From pypy.commits at gmail.com Fri Jan 1 03:32:33 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 01 Jan 2016 00:32:33 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: More precise logic for constant folding Message-ID: <568639a1.87c21c0a.faf38.50bd@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81515:7d8cd001c720 Date: 2015-12-31 14:31 -0500 http://bitbucket.org/pypy/pypy/changeset/7d8cd001c720/ Log: More precise logic for constant folding diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -493,7 +493,7 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): - if op.is_always_pure() and self.get_constant_box(op.getarg(0)): + if op.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: resbox = self.optimizer.constant_fold(op) self.optimizer.make_constant(op, resbox) return diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -375,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -666,7 +666,7 @@ if fielddescr.is_always_pure() != False and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_F, fielddescr, box) + rop.GETFIELD_GC_F, fielddescr, box) return ConstFloat(resvalue) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_F, box, fielddescr, 'f') @@ -676,7 +676,7 @@ if fielddescr.is_always_pure() != False and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_R, fielddescr, box) + rop.GETFIELD_GC_R, fielddescr, box) return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_R, box, fielddescr, 'r') From pypy.commits at gmail.com Fri Jan 1 03:32:35 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 01 Jan 2016 00:32:35 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Log getfield operations as pure baesd on their descriptor Message-ID: <568639a3.2815c20a.d8200.ffffc822@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81516:e05d86ae8016 Date: 2016-01-01 03:06 -0500 http://bitbucket.org/pypy/pypy/changeset/e05d86ae8016/ Log: Log getfield operations as pure baesd on their descriptor diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -188,6 +188,7 @@ else: res = "" is_guard = op.is_guard() + is_pure = "" if op.getdescr() is not None: descr = op.getdescr() if is_guard and self.guard_number: @@ -195,6 +196,8 @@ r = "" % hash else: r = self.repr_of_descr(descr) + if op.is_getfield() and op.is_always_pure(): + is_pure = "_pure" if args: args += ', descr=' + r else: @@ -204,7 +207,7 @@ for arg in op.getfailargs()]) + ']' else: fail_args = '' - return s_offset + res + op.getopname() + '(' + args + ')' + fail_args + return s_offset + res + op.getopname() + is_pure + '(' + args + ')' + fail_args def _log_operations(self, inputargs, operations, ops_offset=None, From pypy.commits at gmail.com Fri Jan 1 03:32:38 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 01 Jan 2016 00:32:38 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Cleanup logic and include GETFIELDS in list of descriptor based pure operations Message-ID: <568639a6.a415c20a.4d933.52ed@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81518:8322672559df Date: 2016-01-01 03:31 -0500 http://bitbucket.org/pypy/pypy/changeset/8322672559df/ Log: Cleanup logic and include GETFIELDS in list of descriptor based pure operations diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -653,7 +653,7 @@ @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): - if fielddescr.is_always_pure() != False and isinstance(box, ConstPtr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely resbox = executor.execute(self.metainterp.cpu, self.metainterp, rop.GETFIELD_GC_I, fielddescr, box) @@ -663,7 +663,7 @@ @arguments("box", "descr") def opimpl_getfield_gc_f(self, box, fielddescr): - if fielddescr.is_always_pure() != False and isinstance(box, ConstPtr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely resvalue = executor.execute(self.metainterp.cpu, self.metainterp, rop.GETFIELD_GC_F, fielddescr, box) @@ -673,7 +673,7 @@ @arguments("box", "descr") def opimpl_getfield_gc_r(self, box, fielddescr): - if fielddescr.is_always_pure() != False and isinstance(box, ConstPtr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely val = executor.execute(self.metainterp.cpu, self.metainterp, rop.GETFIELD_GC_R, fielddescr, box) @@ -2102,6 +2102,9 @@ if (opnum == rop.GETFIELD_RAW_I or opnum == rop.GETFIELD_RAW_R or opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or opnum == rop.GETARRAYITEM_RAW_I or opnum == rop.GETARRAYITEM_RAW_F): is_pure = descr.is_always_pure() From pypy.commits at gmail.com Fri Jan 1 03:32:36 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 01 Jan 2016 00:32:36 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Treat quasiimmut fields as pure for optimization purposes Message-ID: <568639a4.89dec20a.7b97c.62ca@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81517:3113c5395a56 Date: 2016-01-01 03:07 -0500 http://bitbucket.org/pypy/pypy/changeset/3113c5395a56/ Log: Treat quasiimmut fields as pure for optimization purposes diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -152,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -200,7 +200,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) From pypy.commits at gmail.com Fri Jan 1 05:11:11 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 02:11:11 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Add a simple test (cpython 2.7 only) Message-ID: <568650bf.863f1c0a.cca08.ffff9efa@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2502:ab417d35f58c Date: 2016-01-01 11:10 +0100 http://bitbucket.org/cffi/cffi/changeset/ab417d35f58c/ Log: Add a simple test (cpython 2.7 only) diff --git a/testing/embedding/__init__.py b/testing/embedding/__init__.py new file mode 100644 diff --git a/testing/embedding/add1-test.c b/testing/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add1.py @@ -0,0 +1,23 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + extern "Python" int add1(int, int); +""", dllexport=True) + +ffi.embedding_init_code(""" + print("preparing") + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + print "adding", x, "and", y + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +ffi.compile() diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py new file mode 100644 --- /dev/null +++ b/testing/embedding/test_basic.py @@ -0,0 +1,61 @@ +import sys, os +import shutil, subprocess +from testing.udir import udir + +local_dir = os.path.dirname(os.path.abspath(__file__)) + + +class EmbeddingTests: + _compiled_modules = set() + + def get_path(self): + return str(udir.ensure('embedding', dir=True)) + + def _run(self, args, env=None): + print(args) + popen = subprocess.Popen(args, env=env, cwd=self.get_path()) + err = popen.wait() + if err: + raise OSError("popen failed with exit code %r: %r" % ( + err, args)) + + def prepare_module(self, name): + if name not in self._compiled_modules: + path = self.get_path() + filename = '%s.py' % name + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + self._run([sys.executable, os.path.join(local_dir, filename)], + env=env) + self._compiled_modules.add(name) + + def compile(self, name, modules): + path = self.get_path() + filename = '%s.c' % name + shutil.copy(os.path.join(local_dir, filename), path) + self._run(['gcc', filename, '-o', name, '-L.'] + + ['%s.so' % modname for modname in modules] + + ['-lpython2.7']) + + def execute(self, name): + path = self.get_path() + env = os.environ.copy() + env['LD_LIBRARY_PATH'] = path + popen = subprocess.Popen([name], cwd=path, stdout=subprocess.PIPE, + env=env) + result = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("%r failed with exit code %r" % (name, err)) + return result + + +class TestBasic(EmbeddingTests): + def test_basic(self): + self.prepare_module('add1') + self.compile('add1-test', ['_add1_cffi']) + output = self.execute('add1-test') + assert output == ("preparing\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") From pypy.commits at gmail.com Fri Jan 1 05:14:35 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 02:14:35 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Avoid setting LD_LIBRARY_PATH Message-ID: <5686518b.022f1c0a.fa884.53ff@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2503:7542730a8ff3 Date: 2016-01-01 11:14 +0100 http://bitbucket.org/cffi/cffi/changeset/7542730a8ff3/ Log: Avoid setting LD_LIBRARY_PATH diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -35,14 +35,11 @@ shutil.copy(os.path.join(local_dir, filename), path) self._run(['gcc', filename, '-o', name, '-L.'] + ['%s.so' % modname for modname in modules] + - ['-lpython2.7']) + ['-lpython2.7', '-Wl,-rpath=$ORIGIN/']) def execute(self, name): path = self.get_path() - env = os.environ.copy() - env['LD_LIBRARY_PATH'] = path - popen = subprocess.Popen([name], cwd=path, stdout=subprocess.PIPE, - env=env) + popen = subprocess.Popen([name], cwd=path, stdout=subprocess.PIPE) result = popen.stdout.read() err = popen.wait() if err: From pypy.commits at gmail.com Fri Jan 1 05:20:53 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 02:20:53 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: A test with two modules. Message-ID: <56865305.11301c0a.b4ee7.7364@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2504:6239c1250e0b Date: 2016-01-01 11:20 +0100 http://bitbucket.org/cffi/cffi/changeset/6239c1250e0b/ Log: A test with two modules. diff --git a/testing/embedding/add2-test.c b/testing/embedding/add2-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add2-test.c @@ -0,0 +1,14 @@ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add2.py @@ -0,0 +1,21 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + extern "Python" int add2(int, int, int); +""", dllexport=True) + +ffi.embedding_init_code(""" + print("preparing ADD2") + + @ffi.def_extern() + def add2(x, y, z): + print "adding", x, "and", y, "and", z + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +ffi.compile() diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -56,3 +56,14 @@ "adding 40 and 2\n" "adding 100 and -5\n" "got: 42 95\n") + + def test_two_modules(self): + self.prepare_module('add1') + self.prepare_module('add2') + self.compile('add2-test', ['_add1_cffi', '_add2_cffi']) + output = self.execute('add2-test') + assert output == ("preparing\n" + "adding 40 and 2\n" + "preparing ADD2\n" + "adding 100 and -5 and -20\n" + "got: 42 75\n") From pypy.commits at gmail.com Fri Jan 1 05:36:30 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 02:36:30 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: test multiple threads all doing the initial call to an "extern Python" Message-ID: <568656ae.2815c20a.d8200.ffffe01c@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2505:5db2f5b5d3ab Date: 2016-01-01 11:36 +0100 http://bitbucket.org/cffi/cffi/changeset/5db2f5b5d3ab/ Log: test multiple threads all doing the initial call to an "extern Python" function in parallel diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -6,14 +6,20 @@ extern "Python" int add1(int, int); """, dllexport=True) -ffi.embedding_init_code(""" - print("preparing") +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") int(ord("A")) # check that built-ins are there @ffi.def_extern() def add1(x, y): - print "adding", x, "and", y + sys.stdout.write("adding %d and %d\n" % (x, y)) return x + y """) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -29,13 +29,13 @@ env=env) self._compiled_modules.add(name) - def compile(self, name, modules): + def compile(self, name, modules, extra=[]): path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) - self._run(['gcc', filename, '-o', name, '-L.'] + + self._run(['gcc', '-g', filename, '-o', name, '-L.'] + ['%s.so' % modname for modname in modules] + - ['-lpython2.7', '-Wl,-rpath=$ORIGIN/']) + ['-lpython2.7', '-Wl,-rpath=$ORIGIN/'] + extra) def execute(self, name): path = self.get_path() @@ -52,7 +52,7 @@ self.prepare_module('add1') self.compile('add1-test', ['_add1_cffi']) output = self.execute('add1-test') - assert output == ("preparing\n" + assert output == ("preparing...\n" "adding 40 and 2\n" "adding 100 and -5\n" "got: 42 95\n") @@ -62,7 +62,7 @@ self.prepare_module('add2') self.compile('add2-test', ['_add1_cffi', '_add2_cffi']) output = self.execute('add2-test') - assert output == ("preparing\n" + assert output == ("preparing...\n" "adding 40 and 2\n" "preparing ADD2\n" "adding 100 and -5 and -20\n" diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py new file mode 100644 --- /dev/null +++ b/testing/embedding/test_thread.py @@ -0,0 +1,13 @@ +from testing.embedding.test_basic import EmbeddingTests + + +class TestThread(EmbeddingTests): + def test_first_calls_in_parallel(self): + self.prepare_module('add1') + self.compile('thread1-test', ['_add1_cffi'], ['-pthread']) + for i in range(5): + output = self.execute('thread1-test') + assert output == ("starting\n" + "preparing...\n" + + "adding 40 and 2\n" * 10 + + "done\n") diff --git a/testing/embedding/thread1-test.c b/testing/embedding/thread1-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/thread1-test.c @@ -0,0 +1,43 @@ +#include +#include +#include +#include + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int x, y, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 1; i <= NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} From pypy.commits at gmail.com Fri Jan 1 05:45:51 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 02:45:51 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: test for not invoking the init code recursively Message-ID: <568658df.42661c0a.ff609.6863@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2506:1bd9d4b7e959 Date: 2016-01-01 11:45 +0100 http://bitbucket.org/cffi/cffi/changeset/1bd9d4b7e959/ Log: test for not invoking the init code recursively diff --git a/testing/embedding/add_recursive-test.c b/testing/embedding/add_recursive-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add_recursive-test.c @@ -0,0 +1,20 @@ +#include + +extern int add_rec(int, int); +extern int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add_recursive.py @@ -0,0 +1,26 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + int (*my_callback)(int); + extern "Python" int add_rec(int, int); +""", dllexport=True) + +ffi.embedding_init_code(r""" + print "preparing REC" + + @ffi.def_extern() + def add_rec(x, y): + print "adding %d and %d" % (x, y) + return x + y + + x = lib.my_callback(400) + print '<<< %d >>>' % (x,) +""") + +ffi.set_source("_add_recursive_cffi", """ +int (*my_callback)(int); +""") + +ffi.compile() diff --git a/testing/embedding/test_recursive.py b/testing/embedding/test_recursive.py new file mode 100644 --- /dev/null +++ b/testing/embedding/test_recursive.py @@ -0,0 +1,15 @@ +from testing.embedding.test_basic import EmbeddingTests + + +class TestRecursive(EmbeddingTests): + def test_recursive(self): + self.prepare_module('add_recursive') + self.compile('add_recursive-test', ['_add_recursive_cffi']) + output = self.execute('add_recursive-test') + assert output == ("preparing REC\n" + "some_callback(400)\n" + "adding 400 and 9\n" + "<<< 409 >>>\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") From pypy.commits at gmail.com Fri Jan 1 06:10:35 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 03:10:35 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: A test of loading different cffi embedded modules in different threads. Message-ID: <56865eab.83e01c0a.604e0.6184@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2508:d7d65b08d388 Date: 2016-01-01 12:10 +0100 http://bitbucket.org/cffi/cffi/changeset/d7d65b08d388/ Log: A test of loading different cffi embedded modules in different threads. Test fails. diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -11,3 +11,10 @@ "preparing...\n" + "adding 40 and 2\n" * 10 + "done\n") + + def test_init_different_modules_in_different_threads(self): + self.prepare_module('add1') + self.prepare_module('add2') + self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], ['-pthread']) + output = self.execute('thread2-test') + assert output == XXX diff --git a/testing/embedding/thread1-test.c b/testing/embedding/thread1-test.c --- a/testing/embedding/thread1-test.c +++ b/testing/embedding/thread1-test.c @@ -34,7 +34,7 @@ status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); } - for (i = 1; i <= NTHREADS; i++) { + for (i = 0; i < NTHREADS; i++) { status = sem_wait(&done); assert(status == 0); } diff --git a/testing/embedding/thread2-test.c b/testing/embedding/thread2-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/thread2-test.c @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +extern int add1(int, int); +extern int add2(int, int, int); + +static sem_t done; + + +static void *start_routine_1(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_2(void *arg) +{ + int x, status; + x = add2(1000, 200, 30); + assert(x == 1230); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + status = pthread_create(&th, NULL, start_routine_1, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + + for (i = 0; i < 2; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} From pypy.commits at gmail.com Fri Jan 1 06:10:33 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 03:10:33 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: fix docstring Message-ID: <56865ea9.85e41c0a.a3d56.ffffd114@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2507:a0c1877f9051 Date: 2016-01-01 11:50 +0100 http://bitbucket.org/cffi/cffi/changeset/a0c1877f9051/ Log: fix docstring diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -66,7 +66,7 @@ while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { /* should ideally do a spin loop instruction here, but hard to do it portably and doesn't really matter I - think: PyEval_InitThreads() should be very fast, and + think: pthread_mutex_init() should be very fast, and this is only run at start-up anyway. */ } From pypy.commits at gmail.com Fri Jan 1 06:13:20 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 03:13:20 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: An apparently different way of failing Message-ID: <56865f50.4473c20a.ddf72.6ff7@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2509:3d91e4c3fc69 Date: 2016-01-01 12:13 +0100 http://bitbucket.org/cffi/cffi/changeset/3d91e4c3fc69/ Log: An apparently different way of failing diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -18,3 +18,11 @@ self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], ['-pthread']) output = self.execute('thread2-test') assert output == XXX + + def test_next_issue(self): + self.prepare_module('add1') + self.prepare_module('add2') + self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], + ['-pthread', '-DT2TEST_AGAIN_ADD1']) + output = self.execute('thread2-test') + assert output == XXX diff --git a/testing/embedding/thread2-test.c b/testing/embedding/thread2-test.c --- a/testing/embedding/thread2-test.c +++ b/testing/embedding/thread2-test.c @@ -24,6 +24,9 @@ static void *start_routine_2(void *arg) { int x, status; +#ifdef T2TEST_AGAIN_ADD1 + add1(-1, -1); +#endif x = add2(1000, 200, 30); assert(x == 1230); From pypy.commits at gmail.com Fri Jan 1 06:40:32 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 03:40:32 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Update the version numbers and make sure the _cffi_backend module is Message-ID: <568665b0.84e31c0a.fd288.7953@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2510:ba5610ed88e2 Date: 2016-01-01 12:40 +0100 http://bitbucket.org/cffi/cffi/changeset/ba5610ed88e2/ Log: Update the version numbers and make sure the _cffi_backend module is a version that supports embedding! diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6500,7 +6500,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.2"); + v = PyText_FromString("1.4.3"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/cffi1_module.c b/c/cffi1_module.c --- a/c/cffi1_module.c +++ b/c/cffi1_module.c @@ -3,7 +3,7 @@ #include "realize_c_type.c" #define CFFI_VERSION_MIN 0x2601 -#define CFFI_VERSION_MAX 0x26FF +#define CFFI_VERSION_MAX 0x27FF typedef struct FFIObject_s FFIObject; typedef struct LibObject_s LibObject; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.3", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.4.3" +__version_info__ = (1, 4, 3) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -196,7 +196,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.4.2" + "\ncompiled with cffi version: 1.4.3" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -300,6 +301,9 @@ prnt('#endif') lines = self._rel_readlines('_embedding.h') prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') @@ -394,7 +398,7 @@ prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -413,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.2' +release = '1.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.3.tar.gz - - MD5: 81357fe5042d00650b85b728cc181df2 + - MD5: ... - - SHA: 76cff6f1ff5bfb2b9c6c8e2cfa8bf90b5c944394 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.2', + version='1.4.3', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -39,7 +39,10 @@ def execute(self, name): path = self.get_path() - popen = subprocess.Popen([name], cwd=path, stdout=subprocess.PIPE) + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + popen = subprocess.Popen([name], cwd=path, env=env, + stdout=subprocess.PIPE) result = popen.stdout.read() err = popen.wait() if err: From pypy.commits at gmail.com Fri Jan 1 08:49:41 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 05:49:41 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Fix the multithreaded initialization. Message-ID: <568683f5.034cc20a.16a73.ffff9986@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2511:99cb43d80de0 Date: 2016-01-01 14:49 +0100 http://bitbucket.org/cffi/cffi/changeset/99cb43d80de0/ Log: Fix the multithreaded initialization. diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -118,9 +118,14 @@ present .dll/.so is set up as a CPython C extension module. */ int result; + PyGILState_STATE state; PyObject *pycode=NULL, *m=NULL, *global_dict, *x; - PyEval_AcquireLock(); /* acquire the GIL */ + /* Acquire the GIL. We have no threadstate here. If Python is + already initialized, it is possible that there is already one + existing for this thread, but it is not made current now. + */ + PyEval_AcquireLock(); /* XXX use initsigs=0, which "skips initialization registration of signal handlers, which might be useful when Python is @@ -132,6 +137,20 @@ */ Py_InitializeEx(0); + /* The Py_InitializeEx() sometimes made a threadstate for us, but + not always. Indeed Py_InitializeEx() could be called and do + nothing. So do we have a threadstate, or not? We don't know, + but we can replace it with NULL in all cases. + */ + (void)PyThreadState_Swap(NULL); + + /* Now we can release the GIL and re-acquire immediately using the + logic of PyGILState(), which handles making or installing the + correct threadstate. + */ + PyEval_ReleaseLock(); + state = PyGILState_Ensure(); + /* Call the initxxx() function from the present module. It will create and initialize us as a CPython extension module, instead of letting the startup Python code do it---it might reimport @@ -175,7 +194,7 @@ done: Py_XDECREF(pycode); Py_XDECREF(m); - PyEval_ReleaseLock(); /* release the GIL */ + PyGILState_Release(state); return result; error:; diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -6,12 +6,13 @@ extern "Python" int add2(int, int, int); """, dllexport=True) -ffi.embedding_init_code(""" - print("preparing ADD2") +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") @ffi.def_extern() def add2(x, y, z): - print "adding", x, "and", y, "and", z + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) return x + y + z """) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -67,6 +67,6 @@ output = self.execute('add2-test') assert output == ("preparing...\n" "adding 40 and 2\n" - "preparing ADD2\n" + "prepADD2\n" "adding 100 and -5 and -20\n" "got: 42 75\n") diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -5,24 +5,44 @@ def test_first_calls_in_parallel(self): self.prepare_module('add1') self.compile('thread1-test', ['_add1_cffi'], ['-pthread']) - for i in range(5): + for i in range(50): output = self.execute('thread1-test') assert output == ("starting\n" "preparing...\n" + "adding 40 and 2\n" * 10 + "done\n") + def _take_out(self, text, content): + assert content in text + i = text.index(content) + return text[:i] + text[i+len(content):] + def test_init_different_modules_in_different_threads(self): self.prepare_module('add1') self.prepare_module('add2') self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], ['-pthread']) output = self.execute('thread2-test') - assert output == XXX + output = self._take_out(output, "preparing") + output = self._take_out(output, ".") + output = self._take_out(output, ".") + # at least the 3rd dot should be after everything from ADD2 + assert output == ("starting\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + ".\n" + "adding 40 and 2\n" + "done\n") - def test_next_issue(self): + def test_alt_issue(self): self.prepare_module('add1') self.prepare_module('add2') self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], ['-pthread', '-DT2TEST_AGAIN_ADD1']) output = self.execute('thread2-test') - assert output == XXX + output = self._take_out(output, "adding 40 and 2\n") + assert output == ("starting\n" + "preparing...\n" + "adding -1 and -1\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + "done\n") From pypy.commits at gmail.com Fri Jan 1 09:05:37 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 06:05:37 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: One more test, passing Message-ID: <568687b1.ca061c0a.85ab3.ffffb0b3@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2512:c742099e76a4 Date: 2016-01-01 15:05 +0100 http://bitbucket.org/cffi/cffi/changeset/c742099e76a4/ Log: One more test, passing diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add3.py @@ -0,0 +1,21 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + extern "Python" int add3(int, int, int, int); +""", dllexport=True) + +ffi.embedding_init_code(r""" + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +ffi.compile() diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -46,3 +46,16 @@ "prepADD2\n" "adding 1000 and 200 and 30\n" "done\n") + + def test_load_in_parallel_more(self): + self.prepare_module('add2') + self.prepare_module('add3') + self.compile('thread3-test', ['_add2_cffi', '_add3_cffi'], ['-pthread']) + for i in range(150): + output = self.execute('thread3-test') + for j in range(10): + output = self._take_out(output, "adding 40 and 2 and 100\n") + output = self._take_out(output, "adding 1000, 200, 30, 4\n") + assert output == ("starting\n" + "prepADD2\n" + "done\n") diff --git a/testing/embedding/thread3-test.c b/testing/embedding/thread3-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/thread3-test.c @@ -0,0 +1,55 @@ +#include +#include +#include +#include + +extern int add2(int, int, int); +extern int add3(int, int, int, int); + +static sem_t done; + + +static void *start_routine_2(void *arg) +{ + int x, status; + x = add2(40, 2, 100); + assert(x == 142); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_3(void *arg) +{ + int x, status; + x = add3(1000, 200, 30, 4); + assert(x == 1234); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + for (i = 0; i < 10; i++) { + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_3, NULL); + assert(status == 0); + } + for (i = 0; i < 20; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} From pypy.commits at gmail.com Fri Jan 1 15:18:51 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 12:18:51 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5686df2b.0357c20a.f7afb.18bb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r686:42caf95fa698 Date: 2016-01-01 21:18 +0100 http://bitbucket.org/pypy/pypy.org/changeset/42caf95fa698/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61639 of $105000 (58.7%) + $61658 of $105000 (58.7%)
@@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Fri Jan 1 15:29:45 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 12:29:45 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: update to cffi's current static-callback-embedding branch Message-ID: <5686e1b9.6adec20a.ad5ea.16ef@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81519:c9678b8baee7 Date: 2016-01-01 15:39 +0000 http://bitbucket.org/pypy/pypy/changeset/c9678b8baee7/ Log: update to cffi's current static-callback-embedding branch diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.4.3 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.4.3" +__version_info__ = (1, 4, 3) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding_init_code = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -93,7 +94,7 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False, packed=False): + def cdef(self, csource, override=False, packed=False, dllexport=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. @@ -107,7 +108,8 @@ csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, packed=packed, + dllexport=dllexport) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -626,6 +628,19 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding_init_code is not None: + raise ValueError("embedding_init_code() can only be called once") + # check for SyntaxErrors, at least, and automatically add a + # "if 1:" line in front of the code if the whole pysource is + # indented + try: + compile(pysource, "cffi_init", "exec") + except IndentationError: + pysource = 'if 1:\n' + pysource + compile(pysource, "cffi_init", "exec") + self._embedding_init_code = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -377,9 +375,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + else: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +451,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +730,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding_init_code, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding_init_code is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding_init_code),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.4.3" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.3", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py @@ -54,3 +54,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content From pypy.commits at gmail.com Fri Jan 1 15:29:47 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 12:29:47 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: fix Message-ID: <5686e1bb.a85fc20a.99e77.29ab@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81520:978966da7f62 Date: 2016-01-01 20:33 +0000 http://bitbucket.org/pypy/pypy/changeset/978966da7f62/ Log: fix diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -8,7 +8,7 @@ VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 From pypy.commits at gmail.com Fri Jan 1 15:30:20 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 01 Jan 2016 12:30:20 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: fixes Message-ID: <5686e1dc.11301c0a.b4ee7.178e@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2513:779f006c35df Date: 2016-01-01 20:33 +0000 http://bitbucket.org/cffi/cffi/changeset/779f006c35df/ Log: fixes diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -146,7 +146,8 @@ """, version='1.4.3', packages=['cffi'] if cpython else [], - package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} + package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', + '_embedding.h']} if cpython else {}, zip_safe=False, diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -26,4 +26,4 @@ ffi.set_source("_add1_cffi", """ """) -ffi.compile() +ffi.compile(verbose=True) From pypy.commits at gmail.com Sat Jan 2 03:42:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 00:42:29 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: in-progress: a comment about eggs, and hacks to run the tests on pypy (some failures so far) Message-ID: <56878d75.2968c20a.6d969.0bc2@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2514:2e002aebe5fa Date: 2016-01-02 08:46 +0000 http://bitbucket.org/cffi/cffi/changeset/2e002aebe5fa/ Log: in-progress: a comment about eggs, and hacks to run the tests on pypy (some failures so far) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -23,6 +23,12 @@ if name not in self._compiled_modules: path = self.get_path() filename = '%s.py' % name + # NOTE: if you have an .egg globally installed with an older + # version of cffi, this will not work, because sys.path ends + # up with the .egg before the PYTHONPATH entries. I didn't + # find a solution to that: we can hack sys.path inside the + # script run here, but we can't hack it in the same way in + # execute(). env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) self._run([sys.executable, os.path.join(local_dir, filename)], @@ -33,9 +39,22 @@ path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) - self._run(['gcc', '-g', filename, '-o', name, '-L.'] + - ['%s.so' % modname for modname in modules] + - ['-lpython2.7', '-Wl,-rpath=$ORIGIN/'] + extra) + if '__pypy__' in sys.builtin_module_names: + # xxx a bit hackish, maybe ffi.compile() should do a better job + executable = os.path.abspath(sys.executable) + libpypy_c = os.path.join(os.path.dirname(executable), + 'libpypy-c.so') + try: + os.symlink(libpypy_c, os.path.join(path, 'libpypy-c.so')) + except OSError: + pass + self._run(['gcc', '-g', filename, '-o', name, '-L.'] + + ['%s.pypy-26.so' % modname for modname in modules] + + ['-lpypy-c', '-Wl,-rpath=$ORIGIN/'] + extra) + else: + self._run(['gcc', '-g', filename, '-o', name, '-L.'] + + ['%s.so' % modname for modname in modules] + + ['-lpython2.7', '-Wl,-rpath=$ORIGIN/'] + extra) def execute(self, name): path = self.get_path() From pypy.commits at gmail.com Sat Jan 2 03:46:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 00:46:30 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: We need fflush(stdout) here, for a possibly bad reason: cpython 2.x Message-ID: <56878e66.c4b1c20a.b3ce9.ffffb668@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2515:cb13da0c37ca Date: 2016-01-02 08:51 +0000 http://bitbucket.org/cffi/cffi/changeset/cb13da0c37ca/ Log: We need fflush(stdout) here, for a possibly bad reason: cpython 2.x writes to stdout, so it gets in the correct order, but pypy (and likely cpython 3.x) writes directly to the file descriptor diff --git a/testing/embedding/add_recursive-test.c b/testing/embedding/add_recursive-test.c --- a/testing/embedding/add_recursive-test.c +++ b/testing/embedding/add_recursive-test.c @@ -6,6 +6,7 @@ static int some_callback(int x) { printf("some_callback(%d)\n", x); + fflush(stdout); return add_rec(x, 9); } diff --git a/testing/embedding/thread1-test.c b/testing/embedding/thread1-test.c --- a/testing/embedding/thread1-test.c +++ b/testing/embedding/thread1-test.c @@ -30,6 +30,7 @@ assert(status == 0); printf("starting\n"); + fflush(stdout); for (i = 0; i < NTHREADS; i++) { status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); diff --git a/testing/embedding/thread2-test.c b/testing/embedding/thread2-test.c --- a/testing/embedding/thread2-test.c +++ b/testing/embedding/thread2-test.c @@ -43,6 +43,7 @@ assert(status == 0); printf("starting\n"); + fflush(stdout); status = pthread_create(&th, NULL, start_routine_1, NULL); assert(status == 0); status = pthread_create(&th, NULL, start_routine_2, NULL); diff --git a/testing/embedding/thread3-test.c b/testing/embedding/thread3-test.c --- a/testing/embedding/thread3-test.c +++ b/testing/embedding/thread3-test.c @@ -40,6 +40,7 @@ assert(status == 0); printf("starting\n"); + fflush(stdout); for (i = 0; i < 10; i++) { status = pthread_create(&th, NULL, start_routine_2, NULL); assert(status == 0); From pypy.commits at gmail.com Sat Jan 2 04:43:04 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 02 Jan 2016 01:43:04 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: did not think of the following scenario: in a bridge you cannot 'just' use volatile registers as some might be allocated in the trace it is exiting (fixed) Message-ID: <56879ba8.82df1c0a.79914.ffffada2@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81521:8184205817a7 Date: 2016-01-02 10:42 +0100 http://bitbucket.org/pypy/pypy/changeset/8184205817a7/ Log: did not think of the following scenario: in a bridge you cannot 'just' use volatile registers as some might be allocated in the trace it is exiting (fixed) added edge case to gc_load which I have added to gc_load_indexed, but not gc_load diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -300,14 +300,14 @@ self.mc = mc # signature of this _frame_realloc_slowpath function: - # * on entry, r3 is the new size - # * on entry, r2 is the gcmap + # * on entry, r0 is the new size + # * on entry, r1 is the gcmap # * no managed register must be modified ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.r2, l.addr(ofs2, r.SPP)) + mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) - self._push_core_regs_to_jitframe(mc) + self._push_core_regs_to_jitframe(mc, r.MANAGED_REGS) self._push_fp_regs_to_jitframe(mc) self.mc.store_link() @@ -317,6 +317,7 @@ # no need to move second argument (frame_depth), # it is already in register r3! + mc.LGR(r.r3, r.SCRATCH2) RCS2 = r.r10 RCS3 = r.r12 @@ -343,8 +344,7 @@ mc.store(r.r3.value, r.r5.value, -WORD) mc.restore_link() - # do not restore r2, thus [1:] - self._pop_core_regs_from_jitframe(mc, r.MANAGED_REGS[1:]) + self._pop_core_regs_from_jitframe(mc) self._pop_fp_regs_from_jitframe(mc) mc.BCR(c.ANY, r.RETURN) @@ -492,17 +492,17 @@ """ descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu) ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) - mc.LG(r.r2, l.addr(ofs, r.SPP)) + mc.LG(r.SCRATCH2, l.addr(ofs, r.SPP)) patch_pos = mc.currpos() # placeholder for the following instructions - # CGFI r2, ... (6 bytes) + # CGFI r1, ... (6 bytes) # BRC c, ... (4 bytes) - # LGHI r3, ... (4 bytes) + # LGHI r0, ... (4 bytes) # sum -> (14 bytes) mc.write('\x00'*14) self.mc.push_std_frame() mc.load_imm(r.RETURN, self._frame_realloc_slowpath) - self.load_gcmap(mc, r.r2, gcmap) + self.load_gcmap(mc, r.SCRATCH, gcmap) mc.raw_call() self.mc.pop_std_frame() @@ -934,6 +934,8 @@ self.jmpto(r.r14) def _push_all_regs_to_stack(self, mc, withfloats, callee_only=False): + # not used!! + # XXX remove if not needed base_ofs = 2*WORD if callee_only: regs = ZARCHRegisterManager.save_around_call_regs diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -807,7 +807,11 @@ def _emit_gc_load(self, op, arglocs, regalloc): result_loc, base_loc, ofs_loc, size_loc, sign_loc = arglocs - src_addr = l.addr(0, base_loc, ofs_loc) + if ofs_loc.is_imm(): + assert self._mem_offset_supported(ofs_loc.value) + src_addr = l.addr(ofs_loc.value, base_loc) + else: + src_addr = l.addr(0, base_loc, ofs_loc) self._memory_read(result_loc, src_addr, size_loc.value, sign_loc.value) emit_gc_load_i = _emit_gc_load From pypy.commits at gmail.com Sat Jan 2 04:48:52 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 02 Jan 2016 01:48:52 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added some sanity checks, s390x only fails 4 (in test_runner) Message-ID: <56879d04.d4811c0a.6ecfa.18d3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81522:6d2f6b85c6e0 Date: 2016-01-02 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/6d2f6b85c6e0/ Log: added some sanity checks, s390x only fails 4 (in test_runner) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -807,6 +807,7 @@ def _emit_gc_load(self, op, arglocs, regalloc): result_loc, base_loc, ofs_loc, size_loc, sign_loc = arglocs + assert not ofs_loc.is_in_pool() if ofs_loc.is_imm(): assert self._mem_offset_supported(ofs_loc.value) src_addr = l.addr(ofs_loc.value, base_loc) @@ -820,6 +821,7 @@ def _emit_gc_load_indexed(self, op, arglocs, regalloc): result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc =arglocs + assert not offset_loc.is_in_pool() if offset_loc.is_imm() and self._mem_offset_supported(offset_loc.value): addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: @@ -834,6 +836,7 @@ def emit_gc_store(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, size_loc) = arglocs + assert not index_loc.is_in_pool() if index_loc.is_imm() and self._mem_offset_supported(index_loc.value): addr_loc = l.addr(index_loc.value, base_loc) else: From pypy.commits at gmail.com Sat Jan 2 04:56:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 02 Jan 2016 01:56:37 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: asmlen test failed, because more instructions in the entry of a bridge are now compiled (realloc check), seems about right what other backends implement! Message-ID: <56879ed5.89dec20a.7b97c.ffffc3ca@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81523:72ff734c842f Date: 2016-01-02 10:55 +0100 http://bitbucket.org/pypy/pypy/changeset/72ff734c842f/ Log: asmlen test failed, because more instructions in the entry of a bridge are now compiled (realloc check), seems about right what other backends implement! diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -24,6 +24,6 @@ cpu.setup_once() return cpu - # TODO verify: the lgr might be redundant! add_loop_instructions = "lg; lgr; larl; agr; cgfi; je; j;$" - bridge_loop_instructions = ("larl; lg; br;") + bridge_loop_instructions = "larl; lg; cgfi; je; lghi; stg; " \ + "lay; lgfi; lgfi; basr; lay; lg; br;$" From pypy.commits at gmail.com Sat Jan 2 05:18:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 02:18:50 -0800 (PST) Subject: [pypy-commit] pypy default: Fix: we need to "acquire/release the GIL" even around RPython_Startup(), like entrypoint.c does Message-ID: <5687a40a.2815c20a.d8200.3227@mx.google.com> Author: Armin Rigo Branch: Changeset: r81524:2b962d433084 Date: 2016-01-02 10:22 +0000 http://bitbucket.org/pypy/pypy/changeset/2b962d433084/ Log: Fix: we need to "acquire/release the GIL" even around RPython_Startup(), like entrypoint.c does diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -120,9 +120,6 @@ _nowrapper=True, random_effects_on_gcobjs=True) - at entrypoint_lowlevel('main', [], c_name='rpython_startup_code') + at entrypoint_highlevel('main', [], c_name='rpython_startup_code') def rpython_startup_code(): - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py RPython_StartupCode() - rffi.stackcounter.stacks_counter -= 1 From pypy.commits at gmail.com Sat Jan 2 05:33:43 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 02:33:43 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: Fixes. Yay! The cffi tests (after translation) pass. Message-ID: <5687a787.c6ecc20a.ff6b1.ffffc788@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81525:db64914878b9 Date: 2016-01-02 10:37 +0000 http://bitbucket.org/pypy/pypy/changeset/db64914878b9/ Log: Fixes. Yay! The cffi tests (after translation) pass. diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -54,7 +54,9 @@ name = rffi.charp2str(init_struct.name) # space = glob.space + must_leave = False try: + must_leave = space.threadlocals.try_enter_thread(space) load_embedded_cffi_module(space, version, init_struct) res = 0 except OperationError, operr: @@ -67,6 +69,8 @@ sys.stderr.write('sys.path: %r\n' % (sys.path,)) """) res = -1 + if must_leave: + space.threadlocals.leave_thread(space) except Exception, e: # oups! last-level attempt to recover. try: @@ -110,7 +114,6 @@ rpython_startup_code(); RPyGilAllocate(); - RPyGilRelease(); if (dladdr(&_cffi_init, &info) == 0) { _cffi_init_error("dladdr() failed: ", dlerror()); diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -120,9 +120,6 @@ _nowrapper=True, random_effects_on_gcobjs=True) - at entrypoint_lowlevel('main', [], c_name='rpython_startup_code') + at entrypoint_highlevel('main', [], c_name='rpython_startup_code') def rpython_startup_code(): - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py RPython_StartupCode() - rffi.stackcounter.stacks_counter -= 1 From pypy.commits at gmail.com Sat Jan 2 05:57:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 02 Jan 2016 02:57:31 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: exchanged AGHI with LAY, same effect, but LAY does not change the condition code which is needed by the write barrier helper! Message-ID: <5687ad1b.82df1c0a.79914.ffffc14d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81526:b2f6253a910d Date: 2016-01-02 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/b2f6253a910d/ Log: exchanged AGHI with LAY, same effect, but LAY does not change the condition code which is needed by the write barrier helper! diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -170,7 +170,7 @@ self.mc = mc # save the information - mc.STG(r.r14, l.addr(14*WORD, r.SP)) # save the link + mc.store_link() RCS2 = r.r10 RCS3 = r.r12 @@ -240,7 +240,7 @@ if withcards: # A final andix before the blr, for the caller. Careful to # not follow this instruction with another one that changes - # the status of cr0! + # the status of the condition code card_marking_mask = descr.jit_wb_cards_set_singlebyte mc.LLGC(RCS2, l.addr(descr.jit_wb_if_flag_byteofs, RCS2)) mc.NILL(RCS2, l.imm(card_marking_mask & 0xFF)) @@ -253,7 +253,7 @@ self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) - mc.LG(r.RETURN, l.addr(14*WORD, r.SP)) # restore the link + mc.restore_link() mc.BCR(c.ANY, r.RETURN) self.mc = old_mc @@ -935,7 +935,7 @@ def _push_all_regs_to_stack(self, mc, withfloats, callee_only=False): # not used!! - # XXX remove if not needed + # TODO remove if not needed base_ofs = 2*WORD if callee_only: regs = ZARCHRegisterManager.save_around_call_regs diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -454,10 +454,10 @@ mc.load_imm(r.r14, self.wb_slowpath[helper_num]) # alloc a stack frame - mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + mc.push_std_frame() mc.BASR(r.r14, r.r14) # destory the frame - mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + mc.pop_std_frame() if card_marking_mask: # The helper ends again with a check of the flag in the object. @@ -480,26 +480,28 @@ tmp_loc = arglocs[2] n = descr.jit_wb_card_page_shift + assert tmp_loc is not r.SCRATCH + assert tmp_loc is not r.SCRATCH2 + # compute in tmp_loc the byte offset: - # ~(index >> (card_page_shift + 3)) ('~' is 'not_' below) + # ~(index >> (card_page_shift + 3)) mc.SRAG(tmp_loc, loc_index, l.addr(n+3)) - #mc.srli_op(tmp_loc.value, loc_index.value, n + 3) - # invert the bits + + # compute in SCRATCH the index of the bit inside the byte: + # (index >> card_page_shift) & 7 + # not supported on the development s390x :(, extension is not installed + # 0x80 sets zero flag. will store 0 into all selected bits + # mc.RISBGN(r.SCRATCH, loc_index, l.imm(3), l.imm(0x80 | 63), l.imm(61)) + mc.SRAG(r.SCRATCH, loc_index, l.addr(n)) + mc.NILL(r.SCRATCH, l.imm(0x7)) + + # invert the bits of tmp_loc mc.XIHF(tmp_loc, l.imm(0xffffFFFF)) mc.XILF(tmp_loc, l.imm(0xffffFFFF)) - # compute in r2 the index of the bit inside the byte: - # (index >> card_page_shift) & 7 - # 0x80 sets zero flag. will store 0 into all selected bits - # cannot be used on the VM - # mc.RISBGN(r.SCRATCH, loc_index, l.imm(3), l.imm(0x80 | 63), l.imm(61)) - mc.SLAG(r.SCRATCH, loc_index, l.addr(3)) - mc.NILL(r.SCRATCH, l.imm(0xff)) - #mc.rldicl(r.SCRATCH2.value, loc_index.value, 64 - n, 61) - - # set r2 to 1 << r2 + # set SCRATCH to 1 << r2 mc.LGHI(r.SCRATCH2, l.imm(1)) - mc.SLAG(r.SCRATCH, r.SCRATCH2, l.addr(0,r.SCRATCH)) + mc.SLAG(r.SCRATCH2, r.SCRATCH2, l.addr(0,r.SCRATCH)) # set this bit inside the byte of interest addr = l.addr(0, loc_base, tmp_loc) @@ -507,7 +509,6 @@ mc.OGR(r.SCRATCH, r.SCRATCH2) mc.STCY(r.SCRATCH, addr) # done - else: byte_index = loc_index.value >> descr.jit_wb_card_page_shift byte_ofs = ~(byte_index >> 3) From pypy.commits at gmail.com Sat Jan 2 09:33:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 06:33:22 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Pseudo-tests that print some performance numbers for calling an embedded Message-ID: <5687dfb2.11301c0a.b4ee7.066c@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2516:63c49bc07ecc Date: 2016-01-02 15:33 +0100 http://bitbucket.org/cffi/cffi/changeset/63c49bc07ecc/ Log: Pseudo-tests that print some performance numbers for calling an embedded "extern Python" function in a loop, with or without threads. diff --git a/testing/embedding/perf-test.c b/testing/embedding/perf-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/perf-test.c @@ -0,0 +1,86 @@ +#include +#include +#include +#ifdef PTEST_USE_THREAD +# include +# include +static sem_t done; +#endif + + +extern int add1(int, int); + + +static double time_delta(struct timeval *stop, struct timeval *start) +{ + return (stop->tv_sec - start->tv_sec) + + 1e-6 * (stop->tv_usec - start->tv_usec); +} + +static double measure(void) +{ + long long i, iterations; + int result; + struct timeval start, stop; + double elapsed; + + add1(0, 0); /* prepare off-line */ + + i = 0; + iterations = 1000; + result = gettimeofday(&start, NULL); + assert(result == 0); + + while (1) { + for (; i < iterations; i++) { + add1(((int)i) & 0xaaaaaa, ((int)i) & 0x555555); + } + result = gettimeofday(&stop, NULL); + assert(result == 0); + + elapsed = time_delta(&stop, &start); + assert(elapsed >= 0.0); + if (elapsed > 2.5) + break; + iterations = iterations * 3 / 2; + } + + return elapsed / (double)iterations; +} + +static void *start_routine(void *arg) +{ + double t = measure(); + printf("time per call: %.3g\n", t); + +#ifdef PTEST_USE_THREAD + int status = sem_post(&done); + assert(status == 0); +#endif + + return arg; +} + + +int main(void) +{ +#ifndef PTEST_USE_THREAD + start_routine(0); +#else + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + add1(0, 0); /* this is the main thread */ + + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = sem_wait(&done); + assert(status == 0); + } +#endif + return 0; +} diff --git a/testing/embedding/perf.py b/testing/embedding/perf.py new file mode 100644 --- /dev/null +++ b/testing/embedding/perf.py @@ -0,0 +1,18 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + extern "Python" int add1(int, int); +""", dllexport=True) + +ffi.embedding_init_code(r""" + @ffi.def_extern() + def add1(x, y): + return x + y +""") + +ffi.set_source("_perf_cffi", """ +""") + +ffi.compile(verbose=True) diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py new file mode 100644 --- /dev/null +++ b/testing/embedding/test_performance.py @@ -0,0 +1,47 @@ +from testing.embedding.test_basic import EmbeddingTests + + +class TestPerformance(EmbeddingTests): + def test_perf_single_threaded(self): + self.prepare_module('perf') + self.compile('perf-test', ['_perf_cffi'], ['-O2']) + output = self.execute('perf-test') + print '='*79 + print output.rstrip() + print '='*79 + + def test_perf_in_1_thread(self): + self.prepare_module('perf') + self.compile('perf-test', ['_perf_cffi'], + ['-pthread', '-O2', '-DPTEST_USE_THREAD=1']) + output = self.execute('perf-test') + print '='*79 + print output.rstrip() + print '='*79 + + def test_perf_in_2_threads(self): + self.prepare_module('perf') + self.compile('perf-test', ['_perf_cffi'], + ['-pthread', '-O2', '-DPTEST_USE_THREAD=2']) + output = self.execute('perf-test') + print '='*79 + print output.rstrip() + print '='*79 + + def test_perf_in_4_threads(self): + self.prepare_module('perf') + self.compile('perf-test', ['_perf_cffi'], + ['-pthread', '-O2', '-DPTEST_USE_THREAD=4']) + output = self.execute('perf-test') + print '='*79 + print output.rstrip() + print '='*79 + + def test_perf_in_8_threads(self): + self.prepare_module('perf') + self.compile('perf-test', ['_perf_cffi'], + ['-pthread', '-O2', '-DPTEST_USE_THREAD=8']) + output = self.execute('perf-test') + print '='*79 + print output.rstrip() + print '='*79 From pypy.commits at gmail.com Sat Jan 2 09:55:55 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 06:55:55 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: A test checking that thread-local values are saved, even though Message-ID: <5687e4fb.e251c20a.14fea.12ed@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2517:652f66e41c7b Date: 2016-01-02 15:55 +0100 http://bitbucket.org/cffi/cffi/changeset/652f66e41c7b/ Log: A test checking that thread-local values are saved, even though there is no underlying official Python thread diff --git a/testing/embedding/test_tlocal.py b/testing/embedding/test_tlocal.py new file mode 100644 --- /dev/null +++ b/testing/embedding/test_tlocal.py @@ -0,0 +1,10 @@ +from testing.embedding.test_basic import EmbeddingTests + + +class TestThreadLocal(EmbeddingTests): + def test_thread_local(self): + self.prepare_module('tlocal') + self.compile('tlocal-test', ['_tlocal_cffi'], ['-pthread']) + for i in range(50): + output = self.execute('tlocal-test') + assert output == "done\n" diff --git a/testing/embedding/thread1-test.c b/testing/embedding/thread1-test.c --- a/testing/embedding/thread1-test.c +++ b/testing/embedding/thread1-test.c @@ -13,7 +13,7 @@ static void *start_routine(void *arg) { - int x, y, status; + int x, status; x = add1(40, 2); assert(x == 42); diff --git a/testing/embedding/tlocal-test.c b/testing/embedding/tlocal-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/tlocal-test.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int i, x, expected, status; + + expected = add1(40, 2); + assert((expected % 1000) == 42); + + for (i=0; i<10; i++) { + x = add1(40, 2); + assert(x == expected); + } + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py new file mode 100644 --- /dev/null +++ b/testing/embedding/tlocal.py @@ -0,0 +1,26 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + extern "Python" int add1(int, int); +""", dllexport=True) + +ffi.embedding_init_code(r""" + import thread, itertools + tloc = thread._local() + g_seen = itertools.count() + + @ffi.def_extern() + def add1(x, y): + try: + num = tloc.num + except AttributeError: + num = tloc.num = g_seen.next() * 1000 + return x + y + num +""") + +ffi.set_source("_tlocal_cffi", """ +""") + +ffi.compile(verbose=True) From pypy.commits at gmail.com Sat Jan 2 13:22:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 10:22:34 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Draft blog post Message-ID: <5688156a.82df1c0a.79914.47b1@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5576:4e6f2b0b4afb Date: 2016-01-02 19:22 +0100 http://bitbucket.org/pypy/extradoc/changeset/4e6f2b0b4afb/ Log: Draft blog post diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst new file mode 100644 --- /dev/null +++ b/blog/draft/cffi-embedding.rst @@ -0,0 +1,81 @@ +======================== +Using CFFI for embedding +======================== + +CFFI_ has been a great success so far to call C libraries in your +Python programs, in a way that is both simple and that works across +CPython 2.x and 3.x and PyPy. + +We are now adding support for *embedding* Python inside non-Python +programs. This is traditionally done using the CPython C API: from C +code, you call ``Py_Initialize()`` and then some other functions like +``PyRun_SimpleString()``. In the simple cases it is, indeed, simple +enough; but it can become a more complicated story if you throw in +supporting application-dependent object types, and correctly running +on multiple threads, and so on. + +Moreover, this approach is specific to CPython (2.x or 3.x, which you +can do in a similar way). It does not work on PyPy, which has its own +smaller `embedding API`_. + +CFFI now supports embedding directly---and there is no fixed API at +all. The idea is to write some Python script with a ``cdef()`` which +declares a number of ``extern "Python"`` functions. When running the +script, it creates the C source code and compile it to a +dynamically-linked library (``.so`` on Linux). This is the same as in +the regular API-mode usage, and ``extern "Python"`` was `introduced in +CFFI 1.4`_. What is new is that you also give a bit of +initialization-time Python code directly in the script, which will be +compiled into the ``.so`` too---and the ``extern "Python"`` are now +also "exported" from the dynamically-linked library as regular C +functions. + +In other words, this library can now be used directly from any C +program (and it is still importable in Python). It exposes the C API +of your choice, which you specified with the ``extern "Python"`` +declarations. You can use it to make whatever custom API makes sense +in your particular case. You can even make directly a "plug-in" for +any program that supports them, just by exporting the API expected for +such plugins. + +This is still being finalized, but please try it out. (You can also +see `embedding.py`_ directly online here for a quick glance.) + +* get the branch ``static-callback-embedding`` of CFFI (``hg clone https://bitbucket.org/cffi/cffi && hg up static-callback-embedding``) + +* make the ``_cffi_backend.so`` (``python setup_base.py build_ext -f -i``) + +* run ``embedding`` in the ``demo`` directory (``cd demo; PYTHONPATH=.. python embedding.py``) + +* run ``gcc`` to build the C sources (``gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 -I/usr/include/python2.7``) + +* try out the demo C program in ``embedding_test.c`` (``gcc embedding_test.c _embedding_cffi.so && PYTHONPATH=.. LD_LIBRARY_PATH=. a.out``). + +Note that if you get ``ImportError: cffi extension module +'_embedding_cffi' has unknown version 0x2701``, it means that the +``_cffi_backend`` module loaded is a pre-installed one instead of the +more recent one in ``..``. Be sure to use ``PYTHONPATH`` for now. + +Very similar steps can be followed on PyPy, but it requires the +``cffi-static-callback-embedding`` branch of PyPy, which you must +first translate from sources. + +You get a CPython/PyPy that is automatically initialized (using locks +in case of multi-threading) the first time any of the ``extern +"Python"`` functions is called from the C program. The custom +initialization-time Python code is run at that time too. If this code +starts to be big, you may consider moving it to several modules or +packages and importing them from the initialization-time Python code; +in that case you have to be careful about setting up the correct +``sys.path``. + +Note that right now it does not support CPython's notion of multiple +subinterpreters. The logic creates a single global Python interpreter, +and everything is run in that context. Idea about how to support that +cleanly would be welcome ``:-)`` More generally, any feedback is +appreciated. + + +Have fun, + +Armin From pypy.commits at gmail.com Sat Jan 2 13:37:16 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 10:37:16 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: updates Message-ID: <568818dc.83e01c0a.604e0.304d@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2518:59062d93c601 Date: 2016-01-02 19:37 +0100 http://bitbucket.org/cffi/cffi/changeset/59062d93c601/ Log: updates diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -7,9 +7,7 @@ """, dllexport=True) ffi.embedding_init_code(""" - print "preparing" - - intern("foo") + print "preparing" # printed once @ffi.def_extern() def add(x, y): @@ -20,4 +18,9 @@ ffi.set_source("_embedding_cffi", """ """) -ffi.compile() +#ffi.compile() -- should be fixed to do the right thing + +ffi.emit_c_code('_embedding_cffi.c') +# then call the compiler manually with the proper options, like: +# gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 +# -I/usr/include/python2.7 diff --git a/demo/embedding_test.c b/demo/embedding_test.c --- a/demo/embedding_test.c +++ b/demo/embedding_test.c @@ -1,7 +1,7 @@ /* Link this program with libembedding_test.so. E.g. with gcc: - gcc -o embedding_test embedding_test.c _embedding_cffi.so -lpython2.7 + gcc -o embedding_test embedding_test.c _embedding_cffi.so */ #include diff --git a/testing/embedding/test_tlocal.py b/testing/embedding/test_tlocal.py --- a/testing/embedding/test_tlocal.py +++ b/testing/embedding/test_tlocal.py @@ -5,6 +5,6 @@ def test_thread_local(self): self.prepare_module('tlocal') self.compile('tlocal-test', ['_tlocal_cffi'], ['-pthread']) - for i in range(50): + for i in range(10): output = self.execute('tlocal-test') assert output == "done\n" From pypy.commits at gmail.com Sat Jan 2 13:37:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 10:37:20 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: updates Message-ID: <568818e0.2968c20a.6d969.ffffb843@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5577:478b903db2f9 Date: 2016-01-02 19:37 +0100 http://bitbucket.org/pypy/extradoc/changeset/478b903db2f9/ Log: updates diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst --- a/blog/draft/cffi-embedding.rst +++ b/blog/draft/cffi-embedding.rst @@ -18,62 +18,80 @@ can do in a similar way). It does not work on PyPy, which has its own smaller `embedding API`_. -CFFI now supports embedding directly---and there is no fixed API at -all. The idea is to write some Python script with a ``cdef()`` which -declares a number of ``extern "Python"`` functions. When running the -script, it creates the C source code and compile it to a -dynamically-linked library (``.so`` on Linux). This is the same as in -the regular API-mode usage, and ``extern "Python"`` was `introduced in -CFFI 1.4`_. What is new is that you also give a bit of -initialization-time Python code directly in the script, which will be -compiled into the ``.so`` too---and the ``extern "Python"`` are now -also "exported" from the dynamically-linked library as regular C -functions. +The new-and-coming thing about CFFI, meant as replacement of the above +solutions, is direct embedding support---and it does that with no +fixed API at all. The idea is to write some Python script with a +``cdef()`` which declares a number of ``extern "Python"`` functions. +When running the script, it creates the C source code and compiles it +to a dynamically-linked library (``.so`` on Linux). This is the same +as in the regular API-mode usage, and ``extern "Python"`` was +`introduced in CFFI 1.4`_. What is new is that these ``extern +"Python"`` can now also be *exported* from the ``.so``, in the C +sense. You also give a bit of initialization-time Python code +directly in the script, which will be compiled into the ``.so`` +too. In other words, this library can now be used directly from any C program (and it is still importable in Python). It exposes the C API of your choice, which you specified with the ``extern "Python"`` declarations. You can use it to make whatever custom API makes sense -in your particular case. You can even make directly a "plug-in" for +in your particular case. You can even directly make a "plug-in" for any program that supports them, just by exporting the API expected for such plugins. This is still being finalized, but please try it out. (You can also -see `embedding.py`_ directly online here for a quick glance.) +see `embedding.py`_ directly online for a quick glance.) These are +the instructions on Linux with CPython 2.7:: -* get the branch ``static-callback-embedding`` of CFFI (``hg clone https://bitbucket.org/cffi/cffi && hg up static-callback-embedding``) +* get the branch ``static-callback-embedding`` of CFFI:: -* make the ``_cffi_backend.so`` (``python setup_base.py build_ext -f -i``) + hg clone https://bitbucket.org/cffi/cffi + hg up static-callback-embedding -* run ``embedding`` in the ``demo`` directory (``cd demo; PYTHONPATH=.. python embedding.py``) +* make the ``_cffi_backend.so``:: -* run ``gcc`` to build the C sources (``gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 -I/usr/include/python2.7``) + python setup_base.py build_ext -f -i -* try out the demo C program in ``embedding_test.c`` (``gcc embedding_test.c _embedding_cffi.so && PYTHONPATH=.. LD_LIBRARY_PATH=. a.out``). +* run ``embedding.py`` in the ``demo`` directory:: + + cd demo + PYTHONPATH=.. python embedding.py + +* run ``gcc`` to build the C sources---on Linux:: + + gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 -I/usr/include/python2.7 + +* try out the demo C program in ``embedding_test.c``:: + + gcc embedding_test.c _embedding_cffi.so + PYTHONPATH=.. LD_LIBRARY_PATH=. a.out Note that if you get ``ImportError: cffi extension module '_embedding_cffi' has unknown version 0x2701``, it means that the ``_cffi_backend`` module loaded is a pre-installed one instead of the -more recent one in ``..``. Be sure to use ``PYTHONPATH`` for now. +more recent one in ``..``. Be sure to use ``PYTHONPATH=..`` for now. Very similar steps can be followed on PyPy, but it requires the ``cffi-static-callback-embedding`` branch of PyPy, which you must first translate from sources. -You get a CPython/PyPy that is automatically initialized (using locks -in case of multi-threading) the first time any of the ``extern -"Python"`` functions is called from the C program. The custom -initialization-time Python code is run at that time too. If this code -starts to be big, you may consider moving it to several modules or -packages and importing them from the initialization-time Python code; -in that case you have to be careful about setting up the correct -``sys.path``. +CPython 3.x and non-Linux platforms are still a work in progress right +now, but this should be quickly fixed. -Note that right now it does not support CPython's notion of multiple -subinterpreters. The logic creates a single global Python interpreter, -and everything is run in that context. Idea about how to support that -cleanly would be welcome ``:-)`` More generally, any feedback is -appreciated. +Note that CPython/PyPy is automatically initialized (using locks in +case of multi-threading) the first time any of the ``extern "Python"`` +functions is called from the C program. At that time, the custom +initialization-time Python code you put in +``ffi.embedding_init_code()`` is executed. If this code starts to be +big, you may consider moving it to independent modules or packages; +then the initialization-time Python code only needs to import them +(possibly after hacking around with ``sys.path``). + +Another point: right now this does not support CPython's notion of +multiple subinterpreters. The logic creates a single global Python +interpreter, and runs everything in that context. Idea about how to +support that cleanly would be welcome ``:-)`` More generally, any +feedback is appreciated. Have fun, From pypy.commits at gmail.com Sat Jan 2 13:41:36 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 10:41:36 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <568819e0.e686c20a.322f2.437d@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5578:5f72fa7e28b4 Date: 2016-01-02 19:41 +0100 http://bitbucket.org/pypy/extradoc/changeset/5f72fa7e28b4/ Log: tweaks diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst --- a/blog/draft/cffi-embedding.rst +++ b/blog/draft/cffi-embedding.rst @@ -16,7 +16,7 @@ Moreover, this approach is specific to CPython (2.x or 3.x, which you can do in a similar way). It does not work on PyPy, which has its own -smaller `embedding API`_. +smaller but very different `embedding API`_. The new-and-coming thing about CFFI, meant as replacement of the above solutions, is direct embedding support---and it does that with no @@ -39,9 +39,11 @@ any program that supports them, just by exporting the API expected for such plugins. -This is still being finalized, but please try it out. (You can also -see `embedding.py`_ directly online for a quick glance.) These are -the instructions on Linux with CPython 2.7:: +This is still being finalized, but please try it out. (You can also see +`embedding.py`_ directly online for a quick glance.) These are the +instructions on Linux with CPython 2.7 (CPython 3.x and non-Linux +platforms are still a work in progress right now, but this should be +quickly fixed): * get the branch ``static-callback-embedding`` of CFFI:: @@ -75,9 +77,6 @@ ``cffi-static-callback-embedding`` branch of PyPy, which you must first translate from sources. -CPython 3.x and non-Linux platforms are still a work in progress right -now, but this should be quickly fixed. - Note that CPython/PyPy is automatically initialized (using locks in case of multi-threading) the first time any of the ``extern "Python"`` functions is called from the C program. At that time, the custom From pypy.commits at gmail.com Sat Jan 2 13:47:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 10:47:00 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56881b24.c1bb1c0a.a55b1.40ad@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r687:3a738e4c8bd7 Date: 2016-01-02 19:46 +0100 http://bitbucket.org/pypy/pypy.org/changeset/3a738e4c8bd7/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $61658 of $105000 (58.7%) + $62631 of $105000 (59.6%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sat Jan 2 20:10:54 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:10:54 -0800 (PST) Subject: [pypy-commit] pypy default: Fix the test. _PyLong_FromByteArray() always produces nonsense (i.e. a Message-ID: <5688751e.d4811c0a.6ecfa.232f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81527:e205bcf52d2f Date: 2016-01-03 02:10 +0100 http://bitbucket.org/pypy/pypy/changeset/e205bcf52d2f/ Log: Fix the test. _PyLong_FromByteArray() always produces nonsense (i.e. a different result than CPython), which needs to be fixed. diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,10 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 def test_fromunicode(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Sat Jan 2 20:16:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:16:20 -0800 (PST) Subject: [pypy-commit] pypy default: Fix _PyLong_FromByteArray() for e205bcf52d2f Message-ID: <56887664.a8abc20a.d640c.ffffad3f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81528:c626616a34fa Date: 2016-01-03 02:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c626616a34fa/ Log: Fix _PyLong_FromByteArray() for e205bcf52d2f diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -5,7 +5,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask -from rpython.rlib.rbigint import rbigint +from rpython.rlib.rbigint import rbigint, NULLRBIGINT, ONERBIGINT from rpython.rlib.rarithmetic import intmask @@ -229,25 +229,21 @@ little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - result = rbigint() - negative = False + # xxx not the most efficient implementation possible, but should work + result = NULLRBIGINT + c = 0 for i in range(0, n): if little_endian: + c = intmask(bytes[n - i - 1]) + else: c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF digit = rbigint.fromint(c) result = result.lshift(8) result = result.add(digit) - if negative: - result = result.neg() + if signed and c >= 0x80: + result = result.sub(ONERBIGINT.lshift(8 * n)) return space.newlong_from_rbigint(result) - From pypy.commits at gmail.com Sat Jan 2 20:21:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:21:34 -0800 (PST) Subject: [pypy-commit] pypy default: Mention here the _PyLong_FromByteArray() fix Message-ID: <5688779e.c4b61c0a.5d552.ffffce2d@mx.google.com> Author: Armin Rigo Branch: Changeset: r81529:a3691b49bded Date: 2016-01-03 02:20 +0100 http://bitbucket.org/pypy/pypy/changeset/a3691b49bded/ Log: Mention here the _PyLong_FromByteArray() fix diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy From pypy.commits at gmail.com Sat Jan 2 20:22:46 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:22:46 -0800 (PST) Subject: [pypy-commit] pypy default: Minor performance improvement Message-ID: <568877e6.cf0b1c0a.f4895.6901@mx.google.com> Author: Armin Rigo Branch: Changeset: r81530:9dd3ef98eeae Date: 2016-01-03 02:22 +0100 http://bitbucket.org/pypy/pypy/changeset/9dd3ef98eeae/ Log: Minor performance improvement diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -238,10 +238,9 @@ c = intmask(bytes[n - i - 1]) else: c = intmask(bytes[i]) - digit = rbigint.fromint(c) result = result.lshift(8) - result = result.add(digit) + result = result.int_add(c) if signed and c >= 0x80: result = result.sub(ONERBIGINT.lshift(8 * n)) From pypy.commits at gmail.com Sat Jan 2 20:26:52 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:26:52 -0800 (PST) Subject: [pypy-commit] pypy default: More tests, more fixes Message-ID: <568878dc.c5321c0a.26dfa.ffffdba8@mx.google.com> Author: Armin Rigo Branch: Changeset: r81531:6a1567a45cc7 Date: 2016-01-03 02:26 +0100 http://bitbucket.org/pypy/pypy/changeset/6a1567a45cc7/ Log: More tests, more fixes diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -231,18 +231,20 @@ # xxx not the most efficient implementation possible, but should work result = NULLRBIGINT - c = 0 + most_significant = 0 for i in range(0, n): if little_endian: c = intmask(bytes[n - i - 1]) else: c = intmask(bytes[i]) + if i == 0: + most_significant = c result = result.lshift(8) result = result.int_add(c) - if signed and c >= 0x80: + if signed and most_significant >= 0x80: result = result.sub(ONERBIGINT.lshift(8 * n)) return space.newlong_from_rbigint(result) diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -180,6 +180,22 @@ assert module.from_bytearray(False, False) == 0x9ABC assert module.from_bytearray(False, True) == -0x6544 + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF + def test_fromunicode(self): module = self.import_extension('foo', [ ("from_unicode", "METH_O", From pypy.commits at gmail.com Sat Jan 2 20:35:59 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 02 Jan 2016 17:35:59 -0800 (PST) Subject: [pypy-commit] pypy default: Meh! The rbigint class has already got a frombytes() method, which Message-ID: <56887aff.8e371c0a.e9e2b.ffffd8be@mx.google.com> Author: Armin Rigo Branch: Changeset: r81532:c8029737eae2 Date: 2016-01-03 02:35 +0100 http://bitbucket.org/pypy/pypy/changeset/c8029737eae2/ Log: Meh! The rbigint class has already got a frombytes() method, which is a better (and bug-free) version. diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -5,7 +5,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask -from rpython.rlib.rbigint import rbigint, NULLRBIGINT, ONERBIGINT +from rpython.rlib.rbigint import rbigint from rpython.rlib.rarithmetic import intmask @@ -228,23 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - # xxx not the most efficient implementation possible, but should work - result = NULLRBIGINT - most_significant = 0 - - for i in range(0, n): - if little_endian: - c = intmask(bytes[n - i - 1]) - else: - c = intmask(bytes[i]) - if i == 0: - most_significant = c - - result = result.lshift(8) - result = result.int_add(c) - - if signed and most_significant >= 0x80: - result = result.sub(ONERBIGINT.lshift(8 * n)) - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) From pypy.commits at gmail.com Sun Jan 3 03:28:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 00:28:22 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5688dba6.2851c20a.ab8b3.24b3@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r688:273db1fd47d1 Date: 2016-01-03 09:28 +0100 http://bitbucket.org/pypy/pypy.org/changeset/273db1fd47d1/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30297 of $80000 (37.9%) + $30322 of $80000 (37.9%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sun Jan 3 06:28:07 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 03:28:07 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: trying out with a DllMain function Message-ID: <568905c7.cdb81c0a.9debb.43aa@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2521:d7e149c4dc3d Date: 2016-01-03 12:27 +0100 http://bitbucket.org/cffi/cffi/changeset/d7e149c4dc3d/ Log: trying out with a DllMain function diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -4,12 +4,53 @@ /* errno and GetLastError support */ struct cffi_errno_s { + /* The locally-made thread state. This is only non-null in case + we build the thread state here. It remains null if this thread + had already a thread state provided by CPython. */ + PyThreadState *local_thread_state; + + /* The saved errno and lasterror. */ int saved_errno; int saved_lasterror; }; static DWORD cffi_tls_index = TLS_OUT_OF_INDEXES; +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID tls; + + switch (reason_for_call) { + + case DLL_THREAD_DETACH: + if (cffi_tls_index != TLS_OUT_OF_INDEXES) { + tls = TlsGetValue(cffi_tls_index); + if (tls != NULL) { + fprintf(stderr, "thread shutting down! %p\n", + tls->local_thread_state); + TlsSetValue(cffi_tls_index, NULL); + + if (tls->local_thread_state != NULL) { + /* We need to re-acquire the GIL temporarily to free the + thread state. I hope it is not a problem to do it in + DLL_THREAD_DETACH. + */ + PyEval_RestoreThread(tls->local_thread_state); + PyThreadState_DeleteCurrent(); + } + free(tls); + } + } + break; + + default: + break; + } + return TRUE; +} + static void init_cffi_tls(void) { if (cffi_tls_index == TLS_OUT_OF_INDEXES) { @@ -24,7 +65,6 @@ LPVOID p = TlsGetValue(cffi_tls_index); if (p == NULL) { - /* XXX this malloc() leaks */ p = malloc(sizeof(struct cffi_errno_s)); if (p == NULL) return NULL; From pypy.commits at gmail.com Sun Jan 3 06:28:04 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 03:28:04 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: possibly clarify example by using the syntax that allows naturally Message-ID: <568905c4.4f911c0a.3af9c.ffff81f5@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2519:44da071e1d45 Date: 2016-01-03 12:17 +0100 http://bitbucket.org/cffi/cffi/changeset/44da071e1d45/ Log: possibly clarify example by using the syntax that allows naturally to write a bunch of functions diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -3,7 +3,9 @@ ffi = cffi.FFI() ffi.cdef(""" - extern "Python" int add(int, int); + extern "Python" { + int add(int, int); + } """, dllexport=True) ffi.embedding_init_code(""" From pypy.commits at gmail.com Sun Jan 3 06:28:06 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 03:28:06 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: a branch to improve the TLS handling on Windows (and fix a small leak) Message-ID: <568905c6.84ab1c0a.1976a.37d1@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2520:31289132b94b Date: 2016-01-03 12:19 +0100 http://bitbucket.org/cffi/cffi/changeset/31289132b94b/ Log: a branch to improve the TLS handling on Windows (and fix a small leak) From pypy.commits at gmail.com Sun Jan 3 07:25:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 04:25:21 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: windows fix Message-ID: <56891331.482e1c0a.ef337.474c@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2522:4fa3895afca1 Date: 2016-01-03 13:23 +0100 http://bitbucket.org/cffi/cffi/changeset/4fa3895afca1/ Log: windows fix diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -43,6 +43,7 @@ # endif #else /* --- Windows threads version --- */ +# include # define cffi_compare_and_swap(l,o,n) InterlockedCompareExchangePointer(l,n,o) # define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) # define cffi_read_barrier() (void)0 From pypy.commits at gmail.com Sun Jan 3 07:27:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 04:27:50 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: fix Message-ID: <568913c6.8f7e1c0a.31ff5.160b@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2523:19533a7c52c5 Date: 2016-01-03 13:27 +0100 http://bitbucket.org/cffi/cffi/changeset/19533a7c52c5/ Log: fix diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -20,14 +20,16 @@ DWORD reason_for_call, LPVOID reserved) { - LPVOID tls; + LPVOID p; + struct cffi_errno_s *tls; switch (reason_for_call) { case DLL_THREAD_DETACH: if (cffi_tls_index != TLS_OUT_OF_INDEXES) { - tls = TlsGetValue(cffi_tls_index); - if (tls != NULL) { + p = TlsGetValue(cffi_tls_index); + if (p != NULL) { + tls = (struct cffi_errno_s *)p; fprintf(stderr, "thread shutting down! %p\n", tls->local_thread_state); TlsSetValue(cffi_tls_index, NULL); From pypy.commits at gmail.com Sun Jan 3 07:59:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 04:59:26 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: Generalize the code for 'local_thread_state' and move it to Message-ID: <56891b2e.42661c0a.ff609.529c@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2524:345a268931c3 Date: 2016-01-03 13:59 +0100 http://bitbucket.org/cffi/cffi/changeset/345a268931c3/ Log: Generalize the code for 'local_thread_state' and move it to misc_thread_common.h. diff --git a/c/misc_thread_common.h b/c/misc_thread_common.h new file mode 100644 --- /dev/null +++ b/c/misc_thread_common.h @@ -0,0 +1,134 @@ +#ifndef WITH_THREAD +# error "xxx no-thread configuration not tested, please report if you need that" +#endif + + +struct cffi_tls_s { + /* The locally-made thread state. This is only non-null in case + we build the thread state here. It remains null if this thread + had already a thread state provided by CPython. */ + PyThreadState *local_thread_state; + + /* The saved errno. If the C compiler supports '__thread', then + we use that instead; this value is not used at all in this case. */ + int saved_errno; + +#ifdef MS_WIN32 + /* The saved lasterror, on Windows. */ + int saved_lasterror; +#endif +}; + +static struct cffi_tls_s *get_cffi_tls(void); /* in misc_thread_posix.h + or misc_win32.h */ + +static void cffi_thread_shutdown(void *p) +{ + struct cffi_tls_s *tls = (struct cffi_tls_s *)p; + + if (tls->local_thread_state != NULL) { + /* We need to re-acquire the GIL temporarily to free the + thread state. I hope it is not a problem to do it in + a thread-local destructor. + */ + PyEval_RestoreThread(tls->local_thread_state); + PyThreadState_DeleteCurrent(); + } + free(tls); +} + +/* USE__THREAD is defined by setup.py if it finds that it is + syntactically valid to use "__thread" with this C compiler. */ +#ifdef USE__THREAD + +static __thread int cffi_saved_errno = 0; +static void save_errno_only(void) { cffi_saved_errno = errno; } +static void restore_errno_only(void) { errno = cffi_saved_errno; } + +#else + +static void save_errno_only(void) +{ + int saved = errno; + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + tls->saved_errno = saved; +} + +static void restore_errno_only(void) +{ + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + errno = tls->saved_errno; +} + +#endif + + +/* Seems that CPython 3.5.1 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + +static PyThreadState *get_current_ts(void) +{ +#if PY_MAJOR_VERSION >= 3 + return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); +#else + return _PyThreadState_Current; +#endif +} + +static PyGILState_STATE gil_ensure(void) +{ + /* Called at the start of a callback. Replacement for + PyGILState_Ensure(). + */ + PyGILState_STATE result; + struct cffi_tls_s *tls; + PyThreadState *ts = PyGILState_GetThisThreadState(); + + if (ts != NULL) { + ts->gilstate_counter++; + if (ts != get_current_ts()) { + /* common case: 'ts' is our non-current thread state and + we have to make it current and acquire the GIL */ + PyEval_RestoreThread(ts); + return PyGILState_UNLOCKED; + } + else { + return PyGILState_LOCKED; + } + } + else { + /* no thread state here so far. */ + result = PyGILState_Ensure(); + assert(result == PyGILState_UNLOCKED); + + ts = PyGILState_GetThisThreadState(); + assert(ts != NULL); + assert(ts == get_current_ts()); + assert(ts->gilstate_counter >= 1); + + /* Save the now-current thread state inside our 'local_thread_state' + field, to be removed at thread shutdown */ + tls = get_cffi_tls(); + if (tls != NULL) { + tls->local_thread_state = ts; + ts->gilstate_counter++; + } + + return result; + } +} + +static void gil_release(PyGILState_STATE oldstate) +{ + PyGILState_Release(oldstate); +} diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -13,41 +13,15 @@ shut down, using a destructor on the tls key. */ -#ifdef WITH_THREAD #include +#include "misc_thread_common.h" static pthread_key_t cffi_tls_key; -struct cffi_tls_s { - /* The locally-made thread state. This is only non-null in case - we build the thread state here. It remains null if this thread - had already a thread state provided by CPython. */ - PyThreadState *local_thread_state; - - /* The saved errno. If the C compiler supports '__thread', then - we use that instead; this value is not used at all in this case. */ - int saved_errno; -}; - -static void _tls_destructor(void *p) -{ - struct cffi_tls_s *tls = (struct cffi_tls_s *)p; - - if (tls->local_thread_state != NULL) { - /* We need to re-acquire the GIL temporarily to free the - thread state. I hope it is not a problem to do it in - a thread-local destructor. - */ - PyEval_RestoreThread(tls->local_thread_state); - PyThreadState_DeleteCurrent(); - } - free(tls); -} - static void init_cffi_tls(void) { - if (pthread_key_create(&cffi_tls_key, _tls_destructor) != 0) + if (pthread_key_create(&cffi_tls_key, &cffi_thread_shutdown) != 0) PyErr_SetString(PyExc_OSError, "pthread_key_create() failed"); } @@ -71,116 +45,5 @@ return (struct cffi_tls_s *)p; } - -/* USE__THREAD is defined by setup.py if it finds that it is - syntactically valid to use "__thread" with this C compiler. */ -#ifdef USE__THREAD - -static __thread int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -#else - -static void save_errno(void) -{ - int saved = errno; - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - tls->saved_errno = saved; -} - -static void restore_errno(void) -{ - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - errno = tls->saved_errno; -} - -#endif - - -/* Seems that CPython 3.5.1 made our job harder. Did not find out how - to do that without these hacks. We can't use PyThreadState_GET(), - because that calls PyThreadState_Get() which fails an assert if the - result is NULL. */ -#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) - /* this was abruptly un-defined in 3.5.1 */ -void *volatile _PyThreadState_Current; - /* XXX simple volatile access is assumed atomic */ -# define _Py_atomic_load_relaxed(pp) (*(pp)) -#endif - - -static PyThreadState *get_current_ts(void) -{ -#if PY_MAJOR_VERSION >= 3 - return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); -#else - return _PyThreadState_Current; -#endif -} - -static PyGILState_STATE gil_ensure(void) -{ - /* Called at the start of a callback. Replacement for - PyGILState_Ensure(). - */ - PyGILState_STATE result; - struct cffi_tls_s *tls; - PyThreadState *ts = PyGILState_GetThisThreadState(); - - if (ts != NULL) { - ts->gilstate_counter++; - if (ts != get_current_ts()) { - /* common case: 'ts' is our non-current thread state and - we have to make it current and acquire the GIL */ - PyEval_RestoreThread(ts); - return PyGILState_UNLOCKED; - } - else { - return PyGILState_LOCKED; - } - } - else { - /* no thread state here so far. */ - result = PyGILState_Ensure(); - assert(result == PyGILState_UNLOCKED); - - ts = PyGILState_GetThisThreadState(); - assert(ts != NULL); - assert(ts == get_current_ts()); - assert(ts->gilstate_counter >= 1); - - /* Save the now-current thread state inside our 'local_thread_state' - field, to be removed at thread shutdown */ - tls = get_cffi_tls(); - if (tls != NULL) { - tls->local_thread_state = ts; - ts->gilstate_counter++; - } - - return result; - } -} - -static void gil_release(PyGILState_STATE oldstate) -{ - PyGILState_Release(oldstate); -} - - -#else /* !WITH_THREAD */ - -static int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } - -#endif /* !WITH_THREAD */ - - -#define save_errno_only save_errno -#define restore_errno_only restore_errno +#define save_errno save_errno_only +#define restore_errno restore_errno_only diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -1,18 +1,10 @@ #include /* for alloca() */ + /************************************************************/ /* errno and GetLastError support */ -struct cffi_errno_s { - /* The locally-made thread state. This is only non-null in case - we build the thread state here. It remains null if this thread - had already a thread state provided by CPython. */ - PyThreadState *local_thread_state; - - /* The saved errno and lasterror. */ - int saved_errno; - int saved_lasterror; -}; +#include "misc_thread_common.h" static DWORD cffi_tls_index = TLS_OUT_OF_INDEXES; @@ -21,7 +13,6 @@ LPVOID reserved) { LPVOID p; - struct cffi_errno_s *tls; switch (reason_for_call) { @@ -29,20 +20,8 @@ if (cffi_tls_index != TLS_OUT_OF_INDEXES) { p = TlsGetValue(cffi_tls_index); if (p != NULL) { - tls = (struct cffi_errno_s *)p; - fprintf(stderr, "thread shutting down! %p\n", - tls->local_thread_state); TlsSetValue(cffi_tls_index, NULL); - - if (tls->local_thread_state != NULL) { - /* We need to re-acquire the GIL temporarily to free the - thread state. I hope it is not a problem to do it in - DLL_THREAD_DETACH. - */ - PyEval_RestoreThread(tls->local_thread_state); - PyThreadState_DeleteCurrent(); - } - free(tls); + cffi_thread_shutdown(p); } } break; @@ -62,7 +41,7 @@ } } -static struct cffi_errno_s *_geterrno_object(void) +static struct cffi_tls_s *get_cffi_tls(void) { LPVOID p = TlsGetValue(cffi_tls_index); @@ -76,13 +55,15 @@ return (struct cffi_errno_s *)p; } +#ifdef USE__THREAD +# error "unexpected USE__THREAD on Windows" +#endif + static void save_errno(void) { int current_err = errno; int current_lasterr = GetLastError(); - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { p->saved_errno = current_err; p->saved_lasterror = current_lasterr; @@ -90,23 +71,9 @@ /* else: cannot report the error */ } -static void save_errno_only(void) -{ - int current_err = errno; - struct cffi_errno_s *p; - - p = _geterrno_object(); - if (p != NULL) { - p->saved_errno = current_err; - } - /* else: cannot report the error */ -} - static void restore_errno(void) { - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { SetLastError(p->saved_lasterror); errno = p->saved_errno; @@ -114,16 +81,8 @@ /* else: cannot report the error */ } -static void restore_errno_only(void) -{ - struct cffi_errno_s *p; +/************************************************************/ - p = _geterrno_object(); - if (p != NULL) { - errno = p->saved_errno; - } - /* else: cannot report the error */ -} #if PY_MAJOR_VERSION >= 3 static PyObject *b_getwinerror(PyObject *self, PyObject *args, PyObject *kwds) @@ -225,16 +184,6 @@ #endif -#ifdef WITH_THREAD -/* XXX should port the code from misc_thread_posix.h */ -static PyGILState_STATE gil_ensure(void) { return PyGILState_Ensure(); } -static void gil_release(PyGILState_STATE oldst) { PyGILState_Release(oldst); } -#else -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } -#endif - - /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ From pypy.commits at gmail.com Sun Jan 3 08:02:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 05:02:34 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: fixes Message-ID: <56891bea.8a75c20a.13c37.4630@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2525:e7900ea4b1fd Date: 2016-01-03 14:02 +0100 http://bitbucket.org/cffi/cffi/changeset/e7900ea4b1fd/ Log: fixes diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -46,13 +46,13 @@ LPVOID p = TlsGetValue(cffi_tls_index); if (p == NULL) { - p = malloc(sizeof(struct cffi_errno_s)); + p = malloc(sizeof(struct cffi_tls_s)); if (p == NULL) return NULL; - memset(p, 0, sizeof(struct cffi_errno_s)); + memset(p, 0, sizeof(struct cffi_tls_s)); TlsSetValue(cffi_tls_index, p); } - return (struct cffi_errno_s *)p; + return (struct cffi_tls_s *)p; } #ifdef USE__THREAD @@ -97,8 +97,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; @@ -139,7 +138,7 @@ int len; char *s; char *s_buf = NULL; /* Free via LocalFree */ - char s_small_buf[28]; /* Room for "Windows Error 0xFFFFFFFF" */ + char s_small_buf[40]; /* Room for "Windows Error 0xFFFFFFFFFFFFFFFF" */ PyObject *v; static char *keywords[] = {"code", NULL}; @@ -147,8 +146,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; From pypy.commits at gmail.com Sun Jan 3 08:55:31 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 05:55:31 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: Remove this field if it's not used Message-ID: <56892853.85b01c0a.bebb.7bbf@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2526:243617bdfa70 Date: 2016-01-03 14:55 +0100 http://bitbucket.org/cffi/cffi/changeset/243617bdfa70/ Log: Remove this field if it's not used diff --git a/c/misc_thread_common.h b/c/misc_thread_common.h --- a/c/misc_thread_common.h +++ b/c/misc_thread_common.h @@ -9,9 +9,11 @@ had already a thread state provided by CPython. */ PyThreadState *local_thread_state; +#ifndef USE__THREAD /* The saved errno. If the C compiler supports '__thread', then - we use that instead; this value is not used at all in this case. */ + we use that instead. */ int saved_errno; +#endif #ifdef MS_WIN32 /* The saved lasterror, on Windows. */ From pypy.commits at gmail.com Sun Jan 3 12:32:13 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 09:32:13 -0800 (PST) Subject: [pypy-commit] pypy default: Issue #2222: test and fix Message-ID: <56895b1d.6351c20a.2321.ffffba2a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81533:a7b303390e51 Date: 2016-01-03 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/a7b303390e51/ Log: Issue #2222: test and fix diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -97,6 +97,17 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly + if isinstance(buffer, SubBuffer): # don't nest them + # we want a view (offset, size) over a view + # (buffer.offset, buffer.size) over buffer.buffer + at_most = buffer.size - offset + if size > at_most: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,3 +45,9 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + +def test_repeated_subbuffer(): + buf = StringBuffer('x' * 10000) + for i in range(9999, 9, -1): + buf = SubBuffer(buf, 1, i) + assert buf.getlength() == 10 From pypy.commits at gmail.com Sun Jan 3 14:10:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 03 Jan 2016 11:10:27 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56897223.6a9dc20a.4b1ff.3cc7@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r689:1c93de795934 Date: 2016-01-03 20:10 +0100 http://bitbucket.org/pypy/pypy.org/changeset/1c93de795934/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $62631 of $105000 (59.6%) + $62641 of $105000 (59.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Mon Jan 4 03:07:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 00:07:09 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: load_imm might emit different code load 32 or 64 bit imm, added this case to the regex of test_compile_asmlen Message-ID: <568a282d.e686c20a.322f2.7edc@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81534:b2147c0cc9f7 Date: 2016-01-04 08:47 +0100 http://bitbucket.org/pypy/pypy/changeset/b2147c0cc9f7/ Log: load_imm might emit different code load 32 or 64 bit imm, added this case to the regex of test_compile_asmlen diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -25,5 +25,6 @@ return cpu add_loop_instructions = "lg; lgr; larl; agr; cgfi; je; j;$" + # realloc frame takes the most space (from just after larl, to lay) bridge_loop_instructions = "larl; lg; cgfi; je; lghi; stg; " \ - "lay; lgfi; lgfi; basr; lay; lg; br;$" + "lay; lgfi;( iihf;)? lgfi;( iihf;)? basr; lay; lg; br;$" From pypy.commits at gmail.com Mon Jan 4 03:07:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 00:07:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added test_basic & test_calling_conventions, the latter already passes Message-ID: <568a282f.42b81c0a.90f79.ffff8611@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81535:03d679053346 Date: 2016-01-04 09:06 +0100 http://bitbucket.org/pypy/pypy/changeset/03d679053346/ Log: added test_basic & test_calling_conventions, the latter already passes diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -211,6 +211,15 @@ def pop_std_frame(self): self.LAY(r.SP, l.addr(STD_FRAME_SIZE_IN_BYTES, r.SP)) + def get_assembler_function(self): + "NOT_RPYTHON: tests only" + from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager + class FakeCPU: + HAS_CODEMAP = False + asmmemmgr = AsmMemoryManager() + addr = self.materialize(FakeCPU(), []) + return rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), addr) + class OverwritingBuilder(BlockBuilderMixin, AbstractZARCHBuilder): def __init__(self, mc, start, num_insts=0): AbstractZARCHBuilder.__init__(self) diff --git a/rpython/jit/backend/zarch/test/support.py b/rpython/jit/backend/zarch/test/support.py --- a/rpython/jit/backend/zarch/test/support.py +++ b/rpython/jit/backend/zarch/test/support.py @@ -1,3 +1,5 @@ +from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.jit.metainterp.test import support from rpython.rtyper.lltypesystem import lltype, rffi def run_asm(asm, return_float=False): @@ -9,3 +11,13 @@ if return_float: pass return func() + +class JitZARCHMixin(support.LLJitMixin): + type_system = 'lltype' + CPUClass = getcpuclass() + # we have to disable unroll + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" + basic = False + + def check_jumps(self, maxcount): + pass From pypy.commits at gmail.com Mon Jan 4 03:16:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 00:16:16 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: missing files Message-ID: <568a2a50.6351c20a.2321.ffff9d4c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81536:cbc9bc98efec Date: 2016-01-04 09:15 +0100 http://bitbucket.org/pypy/pypy/changeset/cbc9bc98efec/ Log: missing files diff --git a/rpython/jit/backend/zarch/test/test_basic.py b/rpython/jit/backend/zarch/test/test_basic.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_basic.py @@ -0,0 +1,40 @@ +import py +from rpython.jit.codewriter.policy import StopAtXPolicy +from rpython.rlib.jit import JitDriver +from rpython.jit.metainterp.test import test_ajit +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() + +class TestBasic(JitZARCHMixin, test_ajit.BaseLLtypeTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_ajit.py + def test_bug(self): + jitdriver = JitDriver(greens = [], reds = ['n']) + class X(object): + pass + def f(n): + while n > -100: + jitdriver.can_enter_jit(n=n) + jitdriver.jit_merge_point(n=n) + x = X() + x.arg = 5 + if n <= 0: break + n -= x.arg + x.arg = 6 # prevents 'x.arg' from being annotated as constant + return n + res = self.meta_interp(f, [31], enable_opts='') + assert res == -4 + + def test_r_dict(self): + # a Struct that belongs to the hash table is not seen as being + # included in the larger Array + py.test.skip("issue with ll2ctypes") + + def test_free_object(self): + py.test.skip("issue of freeing, probably with ll2ctypes") + + if not CPU.supports_longlong: + def test_read_timestamp(self): + py.test.skip('requires longlong') diff --git a/rpython/jit/backend/zarch/test/test_calling_convention.py b/rpython/jit/backend/zarch/test/test_calling_convention.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_calling_convention.py @@ -0,0 +1,18 @@ +from rpython.jit.backend.test.calling_convention_test import CallingConvTests +from rpython.jit.backend.zarch.codebuilder import InstrBuilder +from rpython.rtyper.lltypesystem import lltype, rffi +import rpython.jit.backend.zarch.registers as r +import rpython.jit.backend.zarch.conditions as c + + +class TestPPCCallingConvention(CallingConvTests): + # ../../test/calling_convention_test.py + + def make_function_returning_stack_pointer(self): + mc = InstrBuilder() + mc.LGR(r.r2, r.SP) + mc.BCR(c.ANY, r.r14) + return rffi.cast(lltype.Signed, mc.get_assembler_function()) + + def get_alignment_requirements(self): + return 2 # two byte alignment From pypy.commits at gmail.com Mon Jan 4 04:33:08 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 01:33:08 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Trying to keep alive the ExecutionContext built in new threads for Message-ID: <568a3c54.9a6f1c0a.5f3c6.ffffadad@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81537:9a3da7ddb21f Date: 2016-01-04 09:40 +0100 http://bitbucket.org/pypy/pypy/changeset/9a3da7ddb21f/ Log: Trying to keep alive the ExecutionContext built in new threads for calls to cffi/ctypes functions (currently a case where PyPy is 5 times slower than the newest CFFI on CPython) From pypy.commits at gmail.com Mon Jan 4 04:33:09 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 01:33:09 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Add objectmodel.fetch_translated_config() Message-ID: <568a3c55.85e41c0a.a3d56.0a88@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81538:8aa4fa415f00 Date: 2016-01-04 09:41 +0100 http://bitbucket.org/pypy/pypy/changeset/8aa4fa415f00/ Log: Add objectmodel.fetch_translated_config() diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -335,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -6,7 +6,8 @@ prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, - setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) + setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, + fetch_translated_config) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -439,6 +440,13 @@ res = self.interpret(f, [42]) assert res == 84 + def test_fetch_translated_config(self): + assert fetch_translated_config() is None + def f(): + return fetch_translated_config().translation.continuation + res = self.interpret(f, []) + assert res is False + def test_specialize_decorator(): def f(): From pypy.commits at gmail.com Mon Jan 4 04:33:11 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 01:33:11 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Add custom trace hooks in order to walk all threadlocalrefs, Message-ID: <568a3c57.87c21c0a.faf38.ffffa0b2@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81539:0c470d6715d7 Date: 2016-01-04 10:32 +0100 http://bitbucket.org/pypy/pypy/changeset/0c470d6715d7/ Log: Add custom trace hooks in order to walk all threadlocalrefs, which are now chained in a doubly-linked list. Of course it only works with our own GCs, not Boehm. diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -94,7 +94,7 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,9 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,19 +379,39 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + self.automatic_keepalive = _automatic_keepalive + + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + +def _automatic_keepalive(): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself. + """ + from rpython.rlib import objectmodel + config = objectmodel.fetch_translated_config() + return (config is not None and + config.translation.gctransformer == "framework") tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -240,3 +240,35 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + + def test_tlref_keepalive(self): + import weakref + from rpython.config.translationoption import SUPPORT__THREAD + + class FooBar(object): + pass + t = ThreadLocalReference(FooBar) + assert t.automatic_keepalive() is False + + def tset(): + x1 = FooBar() + t.set(x1) + return weakref.ref(x1) + tset._dont_inline_ = True + + def f(): + assert t.automatic_keepalive() is True + wr = tset() + import gc; gc.collect() # 'x1' should not be collected + x2 = t.get() + assert x2 is not None + assert wr() is not None + assert wr() is x2 + return 42 + + for no__thread in (True, False): + if SUPPORT__THREAD or no__thread: + extra_options = {'no__thread': no__thread} + fn = self.getcompiled(f, [], extra_options=extra_options) + res = fn() + assert res == 42 diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,9 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_enum(self, prev): + raise NotImplementedError + # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -545,8 +545,9 @@ 'getslice': LLOp(canraise=(Exception,)), 'check_and_clear_exc': LLOp(), - 'threadlocalref_addr': LLOp(sideeffects=False), # get (or make) addr of tl + 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ 'debug_view': LLOp(), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -733,6 +733,7 @@ print >> f, 'struct pypy_threadlocal_s {' print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' + print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -9,14 +9,31 @@ #include "src/threadlocal.h" +static struct pypy_threadlocal_s linkedlist_head = { + .prev = &linkedlist_head, + .next = &linkedlist_head }; + +struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) +{ + if (prev == NULL) + prev = &linkedlist_head; + if (prev->next == &linkedlist_head) + return NULL; + return prev->next; +} + static void _RPy_ThreadLocals_Init(void *p) { + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + struct pypy_threadlocal_s *oldnext; memset(p, 0, sizeof(struct pypy_threadlocal_s)); + #ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; + tls->p_errno = &errno; #endif #ifdef RPY_TLOFS_thread_ident - ((struct pypy_threadlocal_s *)p)->thread_ident = + tls->thread_ident = # ifdef _WIN32 GetCurrentThreadId(); # else @@ -26,7 +43,21 @@ where it is not the case are rather old nowadays. */ # endif #endif - ((struct pypy_threadlocal_s *)p)->ready = 42; + oldnext = linkedlist_head.next; + tls->prev = &linkedlist_head; + tls->next = oldnext; + linkedlist_head.next = tls; + oldnext->prev = tls; + tls->ready = 42; +} + +static void threadloc_unlink(struct pypy_threadlocal_s *tls) +{ + assert(tls->ready == 42); + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; } @@ -53,9 +84,8 @@ void RPython_ThreadLocals_ThreadDie(void) { - memset(&pypy_threadlocal, 0xDD, - sizeof(struct pypy_threadlocal_s)); /* debug */ - pypy_threadlocal.ready = 0; + if (pypy_threadlocal.ready == 42) + threadloc_unlink(&pypy_threadlocal); } @@ -105,7 +135,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - memset(p, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + threadloc_unlink((struct pypy_threadlocal_s *)p); free(p); } } diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -13,14 +13,18 @@ to die. */ RPY_EXTERN void RPython_ThreadLocals_ThreadDie(void); -/* There are two llops: 'threadlocalref_addr' and 'threadlocalref_make'. - They both return the address of the thread-local structure (of the - C type 'struct pypy_threadlocal_s'). The difference is that - OP_THREADLOCALREF_MAKE() checks if we have initialized this thread- - local structure in the current thread, and if not, calls the following - helper. */ +/* 'threadlocalref_addr' returns the address of the thread-local + structure (of the C type 'struct pypy_threadlocal_s'). It first + checks if we have initialized this thread-local structure in the + current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); + +#define OP_THREADLOCALREF_ENUM(p, r) \ + r = _RPython_ThreadLocals_Enum(p) + /* ------------------------------------------------------------ */ #ifdef USE___THREAD diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py --- a/rpython/translator/c/test/test_boehm.py +++ b/rpython/translator/c/test/test_boehm.py @@ -23,6 +23,7 @@ class AbstractGCTestClass(object): gcpolicy = "boehm" use_threads = False + extra_options = {} # deal with cleanups def setup_method(self, meth): @@ -33,8 +34,10 @@ #print "CLEANUP" self._cleanups.pop()() - def getcompiled(self, func, argstypelist=[], annotatorpolicy=None): - return compile(func, argstypelist, gcpolicy=self.gcpolicy, thread=self.use_threads) + def getcompiled(self, func, argstypelist=[], annotatorpolicy=None, + extra_options={}): + return compile(func, argstypelist, gcpolicy=self.gcpolicy, + thread=self.use_threads, **extra_options) class TestUsingBoehm(AbstractGCTestClass): From pypy.commits at gmail.com Mon Jan 4 04:45:06 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 01:45:06 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Leysin Winter Sprint 2016 Message-ID: <568a3f22.01941c0a.aecc6.ffffe2ff@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5579:797731eb2abc Date: 2016-01-04 10:44 +0100 http://bitbucket.org/pypy/extradoc/changeset/797731eb2abc/ Log: Leysin Winter Sprint 2016 diff --git a/sprintinfo/leysin-winter-2016/announcement.txt b/sprintinfo/leysin-winter-2016/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2016/announcement.txt @@ -0,0 +1,79 @@ +===================================================================== + PyPy Leysin Winter Sprint (20-27th February 2016) +===================================================================== + +The next PyPy sprint will be in Leysin, Switzerland, for the eleventh time. +This is a fully public sprint: newcomers and topics other than those +proposed below are welcome. + +------------------------------ +Goals and topics of the sprint +------------------------------ + +The details depend on who is here and ready to work. The list of +topics is mostly the same as last year (did PyPy became a mature +project with only long-term goals?): + +* cpyext (CPython C API emulation layer): various speed and + completeness topics + +* cleaning up the optimization step in the JIT, change the register + allocation done by the JIT's backend, or more improvements to the + warm-up time + +* finish vmprof - a statistical profiler for CPython and PyPy + +* Py3k (Python 3.x support), NumPyPy (the numpy module) + +* STM (Software Transaction Memory), notably: try to come up with + benchmarks, and measure them carefully in order to test and improve + the conflict reporting tools, and more generally to figure out how + practical it is in large projects to avoid conflicts + +* And as usual, the main side goal is to have fun in winter sports :-) + We can take a day off for ski. + +----------- +Exact times +----------- + +I have booked the week from Saturday 20 to Saturday 27. It is however +fine if you prefer to leave only on the Sunday 28, or even stay a few +more days on either side. The plan is to work full days between the 21 +and the 27. You are of course allowed to show up for a part of that +time only, too. + +----------------------- +Location & Accomodation +----------------------- + +Leysin, Switzerland, "same place as before". Let me refresh your +memory: both the sprint venue and the lodging will be in a +pair of chalets built specifically for bed & breakfast: +http://www.ermina.ch/. The place has a good ADSL Internet connection +with wireless installed. You can also arrange your own lodging +elsewhere (as long as you are in Leysin, you cannot be more than a 15 +minutes walk away from the sprint venue). + +Please *confirm* that you are coming so that we can adjust the +reservations as appropriate. + +The options of rooms are a bit more limited than on previous years +because the place for bed-and-breakfast is shrinking: what is +guaranteed is only one double-bed room and a bigger room with 5-6 +individual beds (the latter at 50-60 CHF per night, breakfast +included). If there are more people that would prefer a single room, +please contact me and we'll see what choices you have. There are a +choice of hotels, many of them reasonably priced for Switzerland. + +Please register by Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2016 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +You need a Swiss-to-(insert country here) power adapter. There will be +some Swiss-to-EU adapters around, and at least one EU-format power strip. diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -0,0 +1,65 @@ + +People coming to the Leysin sprint Winter 2015 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Maciej Fijalkowski 21-28 Ermina +Remi Meier 21-28 Ermina +Sebastian Pawlus 21-27 Ermina +Manuel Jacob 21-28 Ermina +Joan Massich 20-? Ermina +Quim Sanchez 20-? Ermina +Alexander Schremmer 21-23 Ermina +Romain Guillebert ? ? +Christian Clauss ? ? +Johan Råde ? ? +Antonio Cuni ? ? +Michael Foord ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Thénault ? ? +==================== ============== ===================== From pypy.commits at gmail.com Mon Jan 4 05:34:03 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 02:34:03 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Finish the RPython part Message-ID: <568a4a9b.913bc20a.d29ab.ffffda62@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81540:a4175679cd80 Date: 2016-01-04 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/a4175679cd80/ Log: Finish the RPython part diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -352,6 +352,8 @@ # A thread-local that points to an object. The object stored in such # a thread-local is kept alive as long as the thread is not finished # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -241,10 +241,13 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' - def test_tlref_keepalive(self): + def test_tlref_keepalive(self, no__thread=True): import weakref from rpython.config.translationoption import SUPPORT__THREAD + if not (SUPPORT__THREAD or no__thread): + py.test.skip("no __thread support here") + class FooBar(object): pass t = ThreadLocalReference(FooBar) @@ -256,6 +259,10 @@ return weakref.ref(x1) tset._dont_inline_ = True + class WrFromThread: + pass + wr_from_thread = WrFromThread() + def f(): assert t.automatic_keepalive() is True wr = tset() @@ -264,11 +271,29 @@ assert x2 is not None assert wr() is not None assert wr() is x2 + return wr + + def thread_entry_point(): + wr = f() + wr_from_thread.wr = wr + wr_from_thread.seen = True + + def main(): + wr_from_thread.seen = False + start_new_thread(thread_entry_point, ()) + wr1 = f() + time.sleep(0.5) + assert wr_from_thread.seen is True + wr2 = wr_from_thread.wr + import gc; gc.collect() # wr2() should be collected here + assert wr1() is not None # this thread, still running + assert wr2() is None # other thread, not running any more return 42 - for no__thread in (True, False): - if SUPPORT__THREAD or no__thread: - extra_options = {'no__thread': no__thread} - fn = self.getcompiled(f, [], extra_options=extra_options) - res = fn() - assert res == 42 + extra_options = {'no__thread': no__thread} + fn = self.getcompiled(main, [], extra_options=extra_options) + res = fn() + assert res == 42 + + def test_tlref_keepalive__thread(self): + self.test_tlref_keepalive(no__thread=False) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,12 +3,15 @@ #include #include #include -#ifndef _WIN32 -# include -#endif #include "src/threadlocal.h" +pthread_key_t pypy_threadlocal_key +#ifdef _WIN32 += TLS_OUT_OF_INDEXES +#endif +; + static struct pypy_threadlocal_s linkedlist_head = { .prev = &linkedlist_head, .next = &linkedlist_head }; @@ -51,63 +54,62 @@ tls->ready = 42; } -static void threadloc_unlink(struct pypy_threadlocal_s *tls) +static void threadloc_unlink(void *p) { - assert(tls->ready == 42); - tls->next->prev = tls->prev; - tls->prev->next = tls->next; - memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ - tls->ready = 0; + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + if (tls->ready == 42) { + tls->ready = 0; + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + } +#ifndef USE___THREAD + free(p); +#endif } - -/* ------------------------------------------------------------ */ -#ifdef USE___THREAD -/* ------------------------------------------------------------ */ - - -/* in this situation, we always have one full 'struct pypy_threadlocal_s' - available, managed by gcc. */ -__thread struct pypy_threadlocal_s pypy_threadlocal; +#ifdef _WIN32 +/* xxx Defines a DllMain() function. It's horrible imho: it only + works if we happen to compile a DLL (not a EXE); and of course you + get link-time errors if two files in the same DLL do the same. + There are some alternatives known, but they are horrible in other + ways (e.g. using undocumented behavior). This seems to be the + simplest, but feel free to fix if you need that. + */ +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + switch (reason_for_call) { + case DLL_THREAD_DETACH: + if (pypy_threadlocal_key != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(pypy_threadlocal_key); + if (p != NULL) { + TlsSetValue(pypy_threadlocal_key, NULL); + threadloc_unlink(p); + } + } + break; + default: + break; + } + return TRUE; +} +#endif void RPython_ThreadLocals_ProgramInit(void) { - _RPy_ThreadLocals_Init(&pypy_threadlocal); -} - -char *_RPython_ThreadLocals_Build(void) -{ - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); - _RPy_ThreadLocals_Init(&pypy_threadlocal); - return (char *)&pypy_threadlocal; -} - -void RPython_ThreadLocals_ThreadDie(void) -{ - if (pypy_threadlocal.ready == 42) - threadloc_unlink(&pypy_threadlocal); -} - - -/* ------------------------------------------------------------ */ -#else -/* ------------------------------------------------------------ */ - - -/* this is the case where the 'struct pypy_threadlocal_s' is allocated - explicitly, with malloc()/free(), and attached to (a single) thread- - local key using the API of Windows or pthread. */ - -pthread_key_t pypy_threadlocal_key; - - -void RPython_ThreadLocals_ProgramInit(void) -{ + /* Initialize the pypy_threadlocal_key, together with a destructor + that will be called every time a thread shuts down (if there is + a non-null thread-local value). This is needed even in the + case where we use '__thread' below, for the destructor. + */ #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) #else - if (pthread_key_create(&pypy_threadlocal_key, NULL) != 0) + if (pthread_key_create(&pypy_threadlocal_key, threadloc_unlink) != 0) #endif { fprintf(stderr, "Internal RPython error: " @@ -117,6 +119,45 @@ _RPython_ThreadLocals_Build(); } + +/* ------------------------------------------------------------ */ +#ifdef USE___THREAD +/* ------------------------------------------------------------ */ + + +/* in this situation, we always have one full 'struct pypy_threadlocal_s' + available, managed by gcc. */ +__thread struct pypy_threadlocal_s pypy_threadlocal; + +char *_RPython_ThreadLocals_Build(void) +{ + RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); + _RPy_ThreadLocals_Init(&pypy_threadlocal); + + /* we also set up &pypy_threadlocal as a POSIX thread-local variable, + because we need the destructor behavior. */ + pthread_setspecific(pypy_threadlocal_key, (void *)&pypy_threadlocal); + + return (char *)&pypy_threadlocal; +} + +void RPython_ThreadLocals_ThreadDie(void) +{ + pthread_setspecific(pypy_threadlocal_key, NULL); + threadloc_unlink(&pypy_threadlocal); +} + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +/* this is the case where the 'struct pypy_threadlocal_s' is allocated + explicitly, with malloc()/free(), and attached to (a single) thread- + local key using the API of Windows or pthread. */ + + char *_RPython_ThreadLocals_Build(void) { void *p = malloc(sizeof(struct pypy_threadlocal_s)); @@ -135,8 +176,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - threadloc_unlink((struct pypy_threadlocal_s *)p); - free(p); + threadloc_unlink(p); /* includes free(p) */ } } From pypy.commits at gmail.com Mon Jan 4 05:49:38 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 02:49:38 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Can't use this way of declaring structures with MSVC Message-ID: <568a4e42.6953c20a.10e25.ffffe7fc@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81541:b0933017bcbc Date: 2016-01-04 11:48 +0100 http://bitbucket.org/pypy/pypy/changeset/b0933017bcbc/ Log: Can't use this way of declaring structures with MSVC diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -734,6 +734,8 @@ print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' + # note: if the four fixed fields above are changed, you need + # to adapt threadlocal.c's linkedlist_head declaration too for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -13,8 +13,10 @@ ; static struct pypy_threadlocal_s linkedlist_head = { - .prev = &linkedlist_head, - .next = &linkedlist_head }; + -1, /* ready */ + NULL, /* stack_end */ + &linkedlist_head, /* prev */ + &linkedlist_head }; /* next */ struct pypy_threadlocal_s * _RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) From pypy.commits at gmail.com Mon Jan 4 05:55:57 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 02:55:57 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Force a 'shared' build in these tests, for Windows Message-ID: <568a4fbd.2a06c20a.83684.7e63@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81542:d05360392056 Date: 2016-01-04 11:54 +0100 http://bitbucket.org/pypy/pypy/changeset/d05360392056/ Log: Force a 'shared' build in these tests, for Windows diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -290,7 +290,7 @@ assert wr2() is None # other thread, not running any more return 42 - extra_options = {'no__thread': no__thread} + extra_options = {'no__thread': no__thread, 'shared': True} fn = self.getcompiled(main, [], extra_options=extra_options) res = fn() assert res == 42 From pypy.commits at gmail.com Mon Jan 4 06:32:53 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 04 Jan 2016 03:32:53 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Fix year. Message-ID: <568a5865.2a06c20a.83684.ffff8c78@mx.google.com> Author: Manuel Jacob Branch: extradoc Changeset: r5580:bde86d938b89 Date: 2016-01-04 12:32 +0100 http://bitbucket.org/pypy/extradoc/changeset/bde86d938b89/ Log: Fix year. diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -1,5 +1,5 @@ -People coming to the Leysin sprint Winter 2015 +People coming to the Leysin sprint Winter 2016 ================================================== People who have a ``?`` in their arrive/depart or accomodation From pypy.commits at gmail.com Mon Jan 4 07:01:21 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 04:01:21 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Clarify? Message-ID: <568a5f11.a5c9c20a.6cfe5.ffffd0bf@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5581:869fbb406d66 Date: 2016-01-04 13:01 +0100 http://bitbucket.org/pypy/extradoc/changeset/869fbb406d66/ Log: Clarify? diff --git a/sprintinfo/leysin-winter-2016/announcement.txt b/sprintinfo/leysin-winter-2016/announcement.txt --- a/sprintinfo/leysin-winter-2016/announcement.txt +++ b/sprintinfo/leysin-winter-2016/announcement.txt @@ -37,8 +37,8 @@ Exact times ----------- -I have booked the week from Saturday 20 to Saturday 27. It is however -fine if you prefer to leave only on the Sunday 28, or even stay a few +I have booked the week from Saturday 20 to Saturday 27. It is fine to +leave either the 27 or the 28, or even stay a few more days on either side. The plan is to work full days between the 21 and the 27. You are of course allowed to show up for a part of that time only, too. From pypy.commits at gmail.com Mon Jan 4 07:12:12 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 04:12:12 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Update people.txt Message-ID: <568a619c.465fc20a.c35be.6ad5@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5582:c552f310783f Date: 2016-01-04 13:11 +0100 http://bitbucket.org/pypy/extradoc/changeset/c552f310783f/ Log: Update people.txt diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -6,26 +6,31 @@ column are known to be coming but there are no details available yet from them. - ==================== ============== ======================= Name Arrive/Depart Accomodation ==================== ============== ======================= Armin Rigo private + (SEE NOTE BELOW) ==================== ============== ======================= +**NOTE:** we might have only a single double-bed room and a big room +(5-6 individual beds). I can ask if more people want smaller rooms, +and/or recommend hotels elsewhere in Leysin. Please be explicit in what +you prefer. + People on the following list were present at previous sprints: ==================== ============== ===================== Name Arrive/Depart Accomodation ==================== ============== ===================== -Maciej Fijalkowski 21-28 Ermina -Remi Meier 21-28 Ermina -Sebastian Pawlus 21-27 Ermina -Manuel Jacob 21-28 Ermina -Joan Massich 20-? Ermina -Quim Sanchez 20-? Ermina -Alexander Schremmer 21-23 Ermina +Maciej Fijalkowski ? ? +Remi Meier ? ? +Sebastian Pawlus ? ? +Manuel Jacob ? ? +Joan Massich ? ? +Quim Sanchez ? ? +Alexander Schremmer ? ? Romain Guillebert ? ? Christian Clauss ? ? Johan Råde ? ? From pypy.commits at gmail.com Mon Jan 4 07:14:40 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 04:14:40 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: precision Message-ID: <568a6230.0c2e1c0a.56608.ffffd0b5@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5583:8193ca1ad79e Date: 2016-01-04 13:14 +0100 http://bitbucket.org/pypy/extradoc/changeset/8193ca1ad79e/ Log: precision diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -16,7 +16,8 @@ **NOTE:** we might have only a single double-bed room and a big room (5-6 individual beds). I can ask if more people want smaller rooms, and/or recommend hotels elsewhere in Leysin. Please be explicit in what -you prefer. +you prefer. The standard booking is for the nights from Saturday to +Saturday, but it is possible to extend that. People on the following list were present at previous sprints: From pypy.commits at gmail.com Mon Jan 4 07:33:38 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 04:33:38 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: me -> to the people.txt list for the leysin sprint Message-ID: <568a66a2.cb571c0a.11403.7ac8@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r5584:6e287dfcc4ab Date: 2016-01-04 13:33 +0100 http://bitbucket.org/pypy/extradoc/changeset/6e287dfcc4ab/ Log: me -> to the people.txt list for the leysin sprint diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -68,4 +68,5 @@ Leonardo Santagada ? ? Alexandre Fayolle ? ? Sylvain Thénault ? ? +Richard Plangger ? ? ==================== ============== ===================== From pypy.commits at gmail.com Mon Jan 4 07:37:45 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 04:37:45 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: ups, did not read the line above, removed me from the list of prev. attendees Message-ID: <568a6799.10581c0a.af20e.1312@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r5585:75f174fab763 Date: 2016-01-04 13:37 +0100 http://bitbucket.org/pypy/extradoc/changeset/75f174fab763/ Log: ups, did not read the line above, removed me from the list of prev. attendees diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -68,5 +68,4 @@ Leonardo Santagada ? ? Alexandre Fayolle ? ? Sylvain Thénault ? ? -Richard Plangger ? ? ==================== ============== ===================== From pypy.commits at gmail.com Mon Jan 4 08:50:05 2016 From: pypy.commits at gmail.com (Raemi) Date: Mon, 04 Jan 2016 05:50:05 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: register for Leysin sprint Message-ID: <568a788d.01941c0a.aecc6.3db6@mx.google.com> Author: Remi Meier Branch: extradoc Changeset: r5586:b10f2a7b55f2 Date: 2016-01-04 14:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/b10f2a7b55f2/ Log: register for Leysin sprint diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -11,6 +11,7 @@ ==================== ============== ======================= Armin Rigo private (SEE NOTE BELOW) +Remi Meier 21-27 Ermina ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Mon Jan 4 10:41:25 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:41:25 -0800 (PST) Subject: [pypy-commit] pypy default: Test and fix Message-ID: <568a92a5.42b81c0a.90f79.3034@mx.google.com> Author: Armin Rigo Branch: Changeset: r81543:4b35b2c07181 Date: 2016-01-04 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/4b35b2c07181/ Log: Test and fix diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -99,9 +99,10 @@ self.readonly = buffer.readonly if isinstance(buffer, SubBuffer): # don't nest them # we want a view (offset, size) over a view - # (buffer.offset, buffer.size) over buffer.buffer - at_most = buffer.size - offset - if size > at_most: + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: if at_most < 0: at_most = 0 size = at_most diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,6 +45,19 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 def test_repeated_subbuffer(): buf = StringBuffer('x' * 10000) From pypy.commits at gmail.com Mon Jan 4 10:42:10 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:42:10 -0800 (PST) Subject: [pypy-commit] pypy default: Update the date Message-ID: <568a92d2.53ad1c0a.cd9fe.ffffd3d1@mx.google.com> Author: Armin Rigo Branch: Changeset: r81544:012d2d144451 Date: 2016-01-04 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/012d2d144451/ Log: Update the date diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at From pypy.commits at gmail.com Mon Jan 4 10:47:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:47:14 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Progress, see comments Message-ID: <568a9402.a3f6c20a.12549.fffffbc5@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81545:7149d01e69d0 Date: 2016-01-04 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/7149d01e69d0/ Log: Progress, see comments diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,6 @@ -from rpython.rlib import rthread +from rpython.rlib import rthread, rweaklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +14,53 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + _next_generation = r_ulonglong(0) + + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +68,36 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.initialize() + self._weaklist.add_handle(AutoFreeECWrapper(self, ec)) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +140,27 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + generations = {} + for h in self._weaklist.get_all_handles(): + wrapper = h() + if wrapper is not None: + key = wrapper.ident + prev = generations.get(key, r_ulonglong(0)) + if wrapper.generation > prev: # implies '.generation != 0' + generations[key] = wrapper.generation + result[key] = wrapper.ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -98,3 +174,27 @@ self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + + def __init__(self, threadlocals, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + threadlocals._next_generation += 1 + self.generation = threadlocals._next_generation + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.generation = r_ulonglong(0) + thread_is_stopping(self.ec) + +class ListECWrappers(rweaklist.RWeakListMixin): + pass diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -391,7 +391,6 @@ self.get = get self.set = set - self.automatic_keepalive = _automatic_keepalive def _trace_tlref(gc, obj, callback, arg): p = llmemory.NULL @@ -404,16 +403,13 @@ TRACETLREF = lltype.GcStruct('TRACETLREF') _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) - -def _automatic_keepalive(): - """Returns True if translated with a GC that keeps alive - the set() value until the end of the thread. Returns False - if you need to keep it alive yourself. - """ - from rpython.rlib import objectmodel - config = objectmodel.fetch_translated_config() - return (config is not None and - config.translation.gctransformer == "framework") + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself. + """ + return config.translation.gctransformer == "framework" tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -1,6 +1,7 @@ import gc, time from rpython.rlib.rthread import * from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib import objectmodel from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py @@ -251,7 +252,6 @@ class FooBar(object): pass t = ThreadLocalReference(FooBar) - assert t.automatic_keepalive() is False def tset(): x1 = FooBar() @@ -264,7 +264,8 @@ wr_from_thread = WrFromThread() def f(): - assert t.automatic_keepalive() is True + config = objectmodel.fetch_translated_config() + assert t.automatic_keepalive(config) is True wr = tset() import gc; gc.collect() # 'x1' should not be collected x2 = t.get() From pypy.commits at gmail.com Mon Jan 4 10:53:54 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:53:54 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge ec-keepalive Message-ID: <568a9592.cf821c0a.e2c36.ffffa717@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81546:cd096cdf82e1 Date: 2016-01-04 16:52 +0100 http://bitbucket.org/pypy/pypy/changeset/cd096cdf82e1/ Log: hg merge ec-keepalive diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,8 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - CANNOT_FAIL) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -387,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=CANNOT_FAIL, external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -414,15 +414,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) @@ -637,7 +648,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -657,7 +668,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,6 @@ -from rpython.rlib import rthread +from rpython.rlib import rthread, rweaklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +14,53 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + _next_generation = r_ulonglong(0) + + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +68,36 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.initialize() + self._weaklist.add_handle(AutoFreeECWrapper(self, ec)) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +140,27 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + generations = {} + for h in self._weaklist.get_all_handles(): + wrapper = h() + if wrapper is not None: + key = wrapper.ident + prev = generations.get(key, r_ulonglong(0)) + if wrapper.generation > prev: # implies '.generation != 0' + generations[key] = wrapper.generation + result[key] = wrapper.ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +170,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + + def __init__(self, threadlocals, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + threadlocals._next_generation += 1 + self.generation = threadlocals._next_generation + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.generation = r_ulonglong(0) + thread_is_stopping(self.ec) + +class ListECWrappers(rweaklist.RWeakListMixin): + pass diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -638,18 +639,9 @@ return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): + a = support.cast_arg(lltype.Ptr(descr.A), a) + array = a._obj assert index >= 0 - if descr.A is descr.OUTERA: - a = support.cast_arg(lltype.Ptr(descr.A), a) - else: - # we use rffi.cast instead of support.cast_arg because the types - # might not be "compatible" enough from the lltype point of - # view. In particular, this happens when we use - # str_storage_getitem, in which an rpy_string is casted to - # rpy_string_as_Signed (or similar) - a = rffi.cast(lltype.Ptr(descr.OUTERA), a) - a = getattr(a, descr.OUTERA._arrayfld) - array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc @@ -714,6 +706,24 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -725,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1021,18 +1021,20 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) - elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): - # special-case 2: GcStruct with Array field - v_inst, c_field, v_index = op.args - STRUCT = v_inst.concretetype.TO - ARRAY = getattr(STRUCT, c_field.value) - assert isinstance(ARRAY, lltype.Array) - arraydescr = self.cpu.arraydescrof(STRUCT) - kind = getkind(op.result.concretetype)[0] - assert kind in ('i', 'f') - return SpaceOperation('getarrayitem_gc_%s' % kind, - [op.args[0], v_index, arraydescr], - op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) else: assert False, 'not supported' @@ -1084,6 +1086,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1434,6 +1434,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -535,16 +535,10 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem it might happen that op.getarg(0) is - # a virtual string, NOT an array. In that case, we cannot cache the - # getarrayitem as if it were an array, obviously. In theory we could - # improve by writing special code to interpter the buffer of the - # virtual string as if it were an array, but it looks complicate, - # fragile and not worth it. arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) cf = None - if indexb.is_constant() and not arrayinfo.is_vstring(): + if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant @@ -561,7 +555,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # the remember the result of reading the array item - if cf is not None and not arrayinfo.is_vstring(): + if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -24,9 +24,6 @@ def is_virtual(self): return False - def is_vstring(self): - return False - def is_precise(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -277,10 +277,8 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem we op.getarg(0) is a string, NOT an - # array, hence the check. In that case, it will be forced opinfo = self.getptrinfo(op.getarg(0)) - if opinfo and opinfo.is_virtual() and not opinfo.is_vstring(): + if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: item = opinfo.getitem(op.getdescr(), indexbox.getint()) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -62,9 +62,6 @@ self.mode = mode self.length = length - def is_vstring(self): - return True - def getlenbound(self, mode): from rpython.jit.metainterp.optimizeopt import intutils diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -810,6 +810,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) diff --git a/rpython/jit/metainterp/test/test_strstorage.py b/rpython/jit/metainterp/test/test_strstorage.py --- a/rpython/jit/metainterp/test/test_strstorage.py +++ b/rpython/jit/metainterp/test/test_strstorage.py @@ -19,7 +19,7 @@ res = self.interp_operations(f, [], supports_singlefloats=True) # kind = getkind(TYPE)[0] # 'i' or 'f' - self.check_operations_history({'getarrayitem_gc_%s' % kind: 1, + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, 'finish': 1}) # if TYPE == lltype.SingleFloat: @@ -29,8 +29,8 @@ return longlong.int2singlefloat(res) return res - def str_storage_supported(self, TYPE): - py.test.skip('this is not a JIT test') + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') def test_force_virtual_str_storage(self): byteorder = sys.byteorder @@ -48,6 +48,6 @@ 'strsetitem': 1, # str forcing 'call_pure_r': 1, # str forcing (copystrcontent) 'guard_no_exception': 1, # str forcing - 'getarrayitem_gc_i': 1, # str_storage_getitem + 'gc_load_indexed_i': 1, # str_storage_getitem 'finish': 1 }) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -97,6 +97,17 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly + if isinstance(buffer, SubBuffer): # don't nest them + # we want a view (offset, size) over a view + # (buffer.offset, buffer.size) over buffer.buffer + at_most = buffer.size - offset + if size > at_most: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -335,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -11,7 +11,6 @@ from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -12,7 +12,7 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib import rarithmetic from rpython.rtyper.lltypesystem import rffi @@ -185,13 +185,14 @@ data = fmtiter.read(size) fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) return - if not str_storage_supported(TYPE): - # this happens e.g. on win32 and ARM32: we cannot read the string - # content as an array of doubles because it's not properly - # aligned. But we can read a longlong and convert to float - assert TYPE == rffi.DOUBLE - assert rffi.sizeof(TYPE) == 8 - return unpack_longlong2float(fmtiter) + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) try: # fast path val = unpack_fastpath(TYPE)(fmtiter) @@ -246,7 +247,7 @@ @specialize.argtype(0) def unpack_int_fastpath_maybe(fmtiter): - if fmtiter.bigendian != native_is_bigendian or not str_storage_supported(TYPE): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): return False try: intvalue = unpack_fastpath(TYPE)(fmtiter) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,20 +381,36 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself. + """ + return config.translation.gctransformer == "framework" + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py --- a/rpython/rlib/strstorage.py +++ b/rpython/rlib/strstorage.py @@ -9,54 +9,31 @@ # rstr.py:copy_string_contents), which has no chance to work during # tracing # -# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading -# from GC pointers. However: -# -# a. we would like to use a CompositeOffset as the offset (using the -# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet) -# supported before translation: it works only if you pass an actual -# integer -# -# b. raw_load from a GC pointer is not (yet) supported by the -# JIT. There are plans to introduce a gc_load operation: when it -# will be there, we could fix the issue above and actually use it to -# implement str_storage_getitem -# -# 3. the actual solution: cast rpy_string to a GcStruct which has the very +# 2. cast rpy_string to a GcStruct which has the very # same layout, with the only difference that its 'chars' field is no # longer an Array(Char) but e.e. an Array(Signed). Then, we just need to -# read the appropriate index into the array +# read the appropriate index into the array. To support this solution, +# the JIT's optimizer needed a few workarounds. This was removed. +# +# 3. use the newly introduced 'llop.gc_load_indexed'. +# -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -from rpython.rtyper.lltypesystem.rstr import STR, _get_raw_str_buf + +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.annlowlevel import llstr -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize - at specialize.memo() -def _rpy_string_as_type(TP): - # sanity check that STR is actually what we think it is - assert STR._flds == { - 'hash': lltype.Signed, - 'chars': lltype.Array(lltype.Char, hints={'immutable': True}) - } - STR_AS_TP = lltype.GcStruct('rpy_string_as_%s' % TP, - ('hash', lltype.Signed), - ('chars', lltype.Array(TP, hints={'immutable': True}))) - return STR_AS_TP - - at specialize.arg(0) -def str_storage_supported(TP): - # on some architectures (e.g. win32 and arm32) an array of longlongs needs - # to be aligned at 8 bytes boundaries, so we cannot safely cast from STR - # to STR_AS_TP. In that case, we str_storage_getitem is simply not - # supported - return rffi.sizeof(TP) <= rffi.sizeof(lltype.Signed) @specialize.ll() -def str_storage_getitem(TP, s, index): - assert str_storage_supported(TP) # sanity check - STR_AS_TP = _rpy_string_as_type(TP) +def str_storage_getitem(TP, s, byte_offset): + # WARNING: the 'byte_offset' is, as its name says, measured in bytes; + # however, it should be aligned for TP, otherwise on some platforms this + # code will crash! lls = llstr(s) - str_as_tp = rffi.cast(lltype.Ptr(STR_AS_TP), lls) - index = index / rffi.sizeof(TP) - return str_as_tp.chars[index] + base_ofs = (llmemory.offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + scale_factor = llmemory.sizeof(lltype.Char) + return llop.gc_load_indexed(TP, lls, byte_offset, + scale_factor, base_ofs) diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,3 +45,9 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + +def test_repeated_subbuffer(): + buf = StringBuffer('x' * 10000) + for i in range(9999, 9, -1): + buf = SubBuffer(buf, 1, i) + assert buf.getlength() == 10 diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -6,7 +6,8 @@ prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, - setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) + setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, + fetch_translated_config) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -439,6 +440,13 @@ res = self.interpret(f, [42]) assert res == 84 + def test_fetch_translated_config(self): + assert fetch_translated_config() is None + def f(): + return fetch_translated_config().translation.continuation + res = self.interpret(f, []) + assert res is False + def test_specialize_decorator(): def f(): diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -1,6 +1,7 @@ import gc, time from rpython.rlib.rthread import * from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib import objectmodel from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py @@ -240,3 +241,60 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + + def test_tlref_keepalive(self, no__thread=True): + import weakref + from rpython.config.translationoption import SUPPORT__THREAD + + if not (SUPPORT__THREAD or no__thread): + py.test.skip("no __thread support here") + + class FooBar(object): + pass + t = ThreadLocalReference(FooBar) + + def tset(): + x1 = FooBar() + t.set(x1) + return weakref.ref(x1) + tset._dont_inline_ = True + + class WrFromThread: + pass + wr_from_thread = WrFromThread() + + def f(): + config = objectmodel.fetch_translated_config() + assert t.automatic_keepalive(config) is True + wr = tset() + import gc; gc.collect() # 'x1' should not be collected + x2 = t.get() + assert x2 is not None + assert wr() is not None + assert wr() is x2 + return wr + + def thread_entry_point(): + wr = f() + wr_from_thread.wr = wr + wr_from_thread.seen = True + + def main(): + wr_from_thread.seen = False + start_new_thread(thread_entry_point, ()) + wr1 = f() + time.sleep(0.5) + assert wr_from_thread.seen is True + wr2 = wr_from_thread.wr + import gc; gc.collect() # wr2() should be collected here + assert wr1() is not None # this thread, still running + assert wr2() is None # other thread, not running any more + return 42 + + extra_options = {'no__thread': no__thread, 'shared': True} + fn = self.getcompiled(main, [], extra_options=extra_options) + res = fn() + assert res == 42 + + def test_tlref_keepalive__thread(self): + self.test_tlref_keepalive(no__thread=False) diff --git a/rpython/rlib/test/test_strstorage.py b/rpython/rlib/test/test_strstorage.py --- a/rpython/rlib/test/test_strstorage.py +++ b/rpython/rlib/test/test_strstorage.py @@ -2,7 +2,7 @@ import sys import struct from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib.rarithmetic import r_singlefloat from rpython.rtyper.test.tool import BaseRtypingTest @@ -10,14 +10,14 @@ class BaseStrStorageTest: - def test_str_getitem_supported(self): - if IS_32BIT: - expected = False - else: - expected = True - # - assert self.str_storage_supported(rffi.LONGLONG) == expected - assert self.str_storage_supported(rffi.DOUBLE) == expected + ## def test_str_getitem_supported(self): + ## if IS_32BIT: + ## expected = False + ## else: + ## expected = True + ## # + ## assert self.str_storage_supported(rffi.LONGLONG) == expected + ## assert self.str_storage_supported(rffi.DOUBLE) == expected def test_signed(self): buf = struct.pack('@ll', 42, 43) @@ -34,8 +34,8 @@ assert int(x) == 43 def test_float(self): - if not str_storage_supported(lltype.Float): - py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') + ## if not str_storage_supported(lltype.Float): + ## py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') buf = struct.pack('@dd', 12.3, 45.6) size = struct.calcsize('@d') assert self.str_storage_getitem(lltype.Float, buf, 0) == 12.3 @@ -52,20 +52,45 @@ class TestDirect(BaseStrStorageTest): - def str_storage_supported(self, TYPE): - return str_storage_supported(TYPE) + ## def str_storage_supported(self, TYPE): + ## return str_storage_supported(TYPE) def str_storage_getitem(self, TYPE, buf, offset): return str_storage_getitem(TYPE, buf, offset) class TestRTyping(BaseStrStorageTest, BaseRtypingTest): - def str_storage_supported(self, TYPE): - def fn(): - return str_storage_supported(TYPE) - return self.interpret(fn, []) + ## def str_storage_supported(self, TYPE): + ## def fn(): + ## return str_storage_supported(TYPE) + ## return self.interpret(fn, []) def str_storage_getitem(self, TYPE, buf, offset): def fn(offset): return str_storage_getitem(TYPE, buf, offset) return self.interpret(fn, [offset]) + + +class TestCompiled(BaseStrStorageTest): + cache = {} + + def str_storage_getitem(self, TYPE, buf, offset): + if TYPE not in self.cache: + from rpython.translator.c.test.test_genc import compile + + assert isinstance(TYPE, lltype.Primitive) + if TYPE in (lltype.Float, lltype.SingleFloat): + TARGET_TYPE = lltype.Float + else: + TARGET_TYPE = lltype.Signed + + def llf(buf, offset): + x = str_storage_getitem(TYPE, buf, offset) + return lltype.cast_primitive(TARGET_TYPE, x) + + fn = compile(llf, [str, int]) + self.cache[TYPE] = fn + # + fn = self.cache[TYPE] + x = fn(buf, offset) + return lltype.cast_primitive(TYPE, x) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,9 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_enum(self, prev): + raise NotImplementedError + # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -902,6 +902,14 @@ llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat elif isinstance(llobj, ComputedIntSymbolic): llobj = llobj.compute_fn() + elif isinstance(llobj, llmemory.CompositeOffset): + llobj = sum([lltype2ctypes(c) for c in llobj.offsets]) + elif isinstance(llobj, llmemory.FieldOffset): + CSTRUCT = get_ctypes_type(llobj.TYPE) + llobj = getattr(CSTRUCT, llobj.fldname).offset + elif isinstance(llobj, llmemory.ArrayItemsOffset): + CARRAY = get_ctypes_type(llobj.TYPE) + llobj = CARRAY.items.offset else: raise NotImplementedError(llobj) # don't know about symbolic value diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -417,6 +417,7 @@ 'raw_load': LLOp(sideeffects=False, canrun=True), 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), + 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), @@ -544,8 +545,9 @@ 'getslice': LLOp(canraise=(Exception,)), 'check_and_clear_exc': LLOp(), - 'threadlocalref_addr': LLOp(sideeffects=False), # get (or make) addr of tl + 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ 'debug_view': LLOp(), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -702,6 +702,17 @@ return p[0] op_raw_load.need_result_type = True +def op_gc_load_indexed(TVAL, p, index, scale, base_ofs): + # 'base_ofs' should be a CompositeOffset(..., ArrayItemsOffset). + # 'scale' should be a llmemory.sizeof(). + from rpython.rtyper.lltypesystem import rffi + ofs = base_ofs + scale * index + if isinstance(ofs, int): + return op_raw_load(TVAL, p, ofs) + p = rffi.cast(rffi.CArrayPtr(TVAL), llmemory.cast_ptr_to_adr(p) + ofs) + return p[0] +op_gc_load_indexed.need_result_type = True + def op_likely(x): assert isinstance(x, bool) return x diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -8,10 +8,10 @@ def mallocbytearray(size): return lltype.malloc(BYTEARRAY, size) -_, _, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, +_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, lltype.Char, 'bytearray') -_, _, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, +_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, BYTEARRAY, lltype.Char, 'bytearray_from_str') diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -136,15 +136,13 @@ copy_raw_to_string = func_with_new_name(copy_raw_to_string, 'copy_raw_to_%s' % name) - return _get_raw_buf, copy_string_to_raw, copy_raw_to_string, copy_string_contents + return copy_string_to_raw, copy_raw_to_string, copy_string_contents -(_get_raw_str_buf, - copy_string_to_raw, +(copy_string_to_raw, copy_raw_to_string, copy_string_contents) = _new_copy_contents_fun(STR, STR, Char, 'string') -(_get_raw_unicode_buf, - copy_unicode_to_raw, +(copy_unicode_to_raw, copy_raw_to_unicode, copy_unicode_contents) = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, 'unicode') diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -299,7 +299,7 @@ def gen_op(self, op): macro = 'OP_%s' % op.opname.upper() line = None - if op.opname.startswith('gc_'): + if op.opname.startswith('gc_') and op.opname != 'gc_load_indexed': meth = getattr(self.gcpolicy, macro, None) if meth: line = meth(self, op) @@ -709,6 +709,19 @@ "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" % locals()) + def OP_GC_LOAD_INDEXED(self, op): + addr = self.expr(op.args[0]) + index = self.expr(op.args[1]) + scale = self.expr(op.args[2]) + base_ofs = self.expr(op.args[3]) + result = self.expr(op.result) + TYPE = op.result.concretetype + typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') + return ( + "%(result)s = ((%(typename)s) (((char *)%(addr)s) + " + "%(base_ofs)s + %(scale)s * %(index)s))[0];" + % locals()) + def OP_CAST_PRIMITIVE(self, op): TYPE = self.lltypemap(op.result) val = self.expr(op.args[0]) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -733,6 +733,9 @@ print >> f, 'struct pypy_threadlocal_s {' print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' + print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' + # note: if the four fixed fields above are changed, you need + # to adapt threadlocal.c's linkedlist_head declaration too for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,20 +3,42 @@ #include #include #include -#ifndef _WIN32 -# include -#endif #include "src/threadlocal.h" +pthread_key_t pypy_threadlocal_key +#ifdef _WIN32 += TLS_OUT_OF_INDEXES +#endif +; + +static struct pypy_threadlocal_s linkedlist_head = { + -1, /* ready */ + NULL, /* stack_end */ + &linkedlist_head, /* prev */ + &linkedlist_head }; /* next */ + +struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) +{ + if (prev == NULL) + prev = &linkedlist_head; + if (prev->next == &linkedlist_head) + return NULL; + return prev->next; +} + static void _RPy_ThreadLocals_Init(void *p) { + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + struct pypy_threadlocal_s *oldnext; memset(p, 0, sizeof(struct pypy_threadlocal_s)); + #ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; + tls->p_errno = &errno; #endif #ifdef RPY_TLOFS_thread_ident - ((struct pypy_threadlocal_s *)p)->thread_ident = + tls->thread_ident = # ifdef _WIN32 GetCurrentThreadId(); # else @@ -26,58 +48,70 @@ where it is not the case are rather old nowadays. */ # endif #endif - ((struct pypy_threadlocal_s *)p)->ready = 42; + oldnext = linkedlist_head.next; + tls->prev = &linkedlist_head; + tls->next = oldnext; + linkedlist_head.next = tls; + oldnext->prev = tls; + tls->ready = 42; } +static void threadloc_unlink(void *p) +{ + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + if (tls->ready == 42) { + tls->ready = 0; + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + } +#ifndef USE___THREAD + free(p); +#endif +} -/* ------------------------------------------------------------ */ -#ifdef USE___THREAD -/* ------------------------------------------------------------ */ - - -/* in this situation, we always have one full 'struct pypy_threadlocal_s' - available, managed by gcc. */ -__thread struct pypy_threadlocal_s pypy_threadlocal; +#ifdef _WIN32 +/* xxx Defines a DllMain() function. It's horrible imho: it only + works if we happen to compile a DLL (not a EXE); and of course you + get link-time errors if two files in the same DLL do the same. + There are some alternatives known, but they are horrible in other + ways (e.g. using undocumented behavior). This seems to be the + simplest, but feel free to fix if you need that. + */ +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + switch (reason_for_call) { + case DLL_THREAD_DETACH: + if (pypy_threadlocal_key != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(pypy_threadlocal_key); + if (p != NULL) { + TlsSetValue(pypy_threadlocal_key, NULL); + threadloc_unlink(p); + } + } + break; + default: + break; + } + return TRUE; +} +#endif void RPython_ThreadLocals_ProgramInit(void) { - _RPy_ThreadLocals_Init(&pypy_threadlocal); -} - -char *_RPython_ThreadLocals_Build(void) -{ - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); - _RPy_ThreadLocals_Init(&pypy_threadlocal); - return (char *)&pypy_threadlocal; -} - -void RPython_ThreadLocals_ThreadDie(void) -{ - memset(&pypy_threadlocal, 0xDD, - sizeof(struct pypy_threadlocal_s)); /* debug */ - pypy_threadlocal.ready = 0; -} - - -/* ------------------------------------------------------------ */ -#else -/* ------------------------------------------------------------ */ - - -/* this is the case where the 'struct pypy_threadlocal_s' is allocated - explicitly, with malloc()/free(), and attached to (a single) thread- - local key using the API of Windows or pthread. */ - -pthread_key_t pypy_threadlocal_key; - - -void RPython_ThreadLocals_ProgramInit(void) -{ + /* Initialize the pypy_threadlocal_key, together with a destructor + that will be called every time a thread shuts down (if there is + a non-null thread-local value). This is needed even in the + case where we use '__thread' below, for the destructor. + */ #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) #else - if (pthread_key_create(&pypy_threadlocal_key, NULL) != 0) + if (pthread_key_create(&pypy_threadlocal_key, threadloc_unlink) != 0) #endif { fprintf(stderr, "Internal RPython error: " @@ -87,6 +121,45 @@ _RPython_ThreadLocals_Build(); } + +/* ------------------------------------------------------------ */ +#ifdef USE___THREAD +/* ------------------------------------------------------------ */ + + +/* in this situation, we always have one full 'struct pypy_threadlocal_s' + available, managed by gcc. */ +__thread struct pypy_threadlocal_s pypy_threadlocal; + +char *_RPython_ThreadLocals_Build(void) +{ + RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); + _RPy_ThreadLocals_Init(&pypy_threadlocal); + + /* we also set up &pypy_threadlocal as a POSIX thread-local variable, + because we need the destructor behavior. */ + pthread_setspecific(pypy_threadlocal_key, (void *)&pypy_threadlocal); + + return (char *)&pypy_threadlocal; +} + +void RPython_ThreadLocals_ThreadDie(void) +{ + pthread_setspecific(pypy_threadlocal_key, NULL); + threadloc_unlink(&pypy_threadlocal); +} + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +/* this is the case where the 'struct pypy_threadlocal_s' is allocated + explicitly, with malloc()/free(), and attached to (a single) thread- + local key using the API of Windows or pthread. */ + + char *_RPython_ThreadLocals_Build(void) { void *p = malloc(sizeof(struct pypy_threadlocal_s)); @@ -105,8 +178,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - memset(p, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ - free(p); + threadloc_unlink(p); /* includes free(p) */ } } diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -13,14 +13,18 @@ to die. */ RPY_EXTERN void RPython_ThreadLocals_ThreadDie(void); -/* There are two llops: 'threadlocalref_addr' and 'threadlocalref_make'. - They both return the address of the thread-local structure (of the - C type 'struct pypy_threadlocal_s'). The difference is that - OP_THREADLOCALREF_MAKE() checks if we have initialized this thread- - local structure in the current thread, and if not, calls the following - helper. */ +/* 'threadlocalref_addr' returns the address of the thread-local + structure (of the C type 'struct pypy_threadlocal_s'). It first + checks if we have initialized this thread-local structure in the + current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); + +#define OP_THREADLOCALREF_ENUM(p, r) \ + r = _RPython_ThreadLocals_Enum(p) + /* ------------------------------------------------------------ */ #ifdef USE___THREAD diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py --- a/rpython/translator/c/test/test_boehm.py +++ b/rpython/translator/c/test/test_boehm.py @@ -23,6 +23,7 @@ class AbstractGCTestClass(object): gcpolicy = "boehm" use_threads = False + extra_options = {} # deal with cleanups def setup_method(self, meth): @@ -33,8 +34,10 @@ #print "CLEANUP" self._cleanups.pop()() - def getcompiled(self, func, argstypelist=[], annotatorpolicy=None): - return compile(func, argstypelist, gcpolicy=self.gcpolicy, thread=self.use_threads) + def getcompiled(self, func, argstypelist=[], annotatorpolicy=None, + extra_options={}): + return compile(func, argstypelist, gcpolicy=self.gcpolicy, + thread=self.use_threads, **extra_options) class TestUsingBoehm(AbstractGCTestClass): From pypy.commits at gmail.com Mon Jan 4 10:55:36 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:55:36 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: hg merge default Message-ID: <568a95f8.6650c20a.a11d6.4f18@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81547:59373f8be2eb Date: 2016-01-04 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/59373f8be2eb/ Log: hg merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -99,9 +99,10 @@ self.readonly = buffer.readonly if isinstance(buffer, SubBuffer): # don't nest them # we want a view (offset, size) over a view - # (buffer.offset, buffer.size) over buffer.buffer - at_most = buffer.size - offset - if size > at_most: + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: if at_most < 0: at_most = 0 size = at_most diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,6 +45,19 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 def test_repeated_subbuffer(): buf = StringBuffer('x' * 10000) From pypy.commits at gmail.com Mon Jan 4 10:55:39 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 07:55:39 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge ec-keepalive Message-ID: <568a95fb.42b81c0a.90f79.35be@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81548:f1f0ea2d424c Date: 2016-01-04 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/f1f0ea2d424c/ Log: hg merge ec-keepalive diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -99,9 +99,10 @@ self.readonly = buffer.readonly if isinstance(buffer, SubBuffer): # don't nest them # we want a view (offset, size) over a view - # (buffer.offset, buffer.size) over buffer.buffer - at_most = buffer.size - offset - if size > at_most: + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: if at_most < 0: at_most = 0 size = at_most diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,6 +45,19 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 def test_repeated_subbuffer(): buf = StringBuffer('x' * 10000) From pypy.commits at gmail.com Mon Jan 4 11:09:32 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 08:09:32 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Simplify: rshrinklist keeps the order, unlike rweaklist which keeps the Message-ID: <568a993c.6a9dc20a.4b1ff.ffffae3d@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81549:9d442fbb0338 Date: 2016-01-04 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/9d442fbb0338/ Log: Simplify: rshrinklist keeps the order, unlike rweaklist which keeps the index (in this case we don't need the latter, but the former helps) diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,4 +1,4 @@ -from rpython.rlib import rthread, rweaklist +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error @@ -14,8 +14,6 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - _next_generation = r_ulonglong(0) - def __init__(self, space): "NOT_RPYTHON" # @@ -86,8 +84,7 @@ # explicitly, so we return False. if self._weaklist is None: self._weaklist = ListECWrappers() - self._weaklist.initialize() - self._weaklist.add_handle(AutoFreeECWrapper(self, ec)) + self._weaklist.append(AutoFreeECWrapper(ec)) self._set_ec(ec, register_in_valuedict=False) return False @@ -150,15 +147,11 @@ # self._valuedict have priority because they are never # outdated. result = {} - generations = {} - for h in self._weaklist.get_all_handles(): + for h in self._weaklist.items(): wrapper = h() - if wrapper is not None: - key = wrapper.ident - prev = generations.get(key, r_ulonglong(0)) - if wrapper.generation > prev: # implies '.generation != 0' - generations[key] = wrapper.generation - result[key] = wrapper.ec + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec result.update(self._valuedict) return result @@ -177,12 +170,11 @@ class AutoFreeECWrapper(object): + deleted = False - def __init__(self, threadlocals, ec): + def __init__(self, ec): # this makes a loop between 'self' and 'ec'. It should not prevent # the __del__ method here from being called. - threadlocals._next_generation += 1 - self.generation = threadlocals._next_generation self.ec = ec ec._threadlocals_auto_free = self self.ident = rthread.get_ident() @@ -193,8 +185,9 @@ # referenced by 'self.ec' has finished at that point, and # we're just after the GC which finds no more references to # 'ec' (and thus to 'self'). - self.generation = r_ulonglong(0) + self.deleted = True thread_is_stopping(self.ec) -class ListECWrappers(rweaklist.RWeakListMixin): - pass +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None From pypy.commits at gmail.com Mon Jan 4 11:26:15 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 08:26:15 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Document and cross-reference these two modules, which implement Message-ID: <568a9d27.2968c20a.6d969.ffffb4eb@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81550:52bed60ea80d Date: 2016-01-04 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/52bed60ea80d/ Log: Document and cross-reference these two modules, which implement somewhat similar but subtly different semantics. diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): From pypy.commits at gmail.com Mon Jan 4 11:33:32 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 08:33:32 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: fix Message-ID: <568a9edc.e935c20a.161dc.4967@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81551:8f6b90ba52b5 Date: 2016-01-04 16:32 +0000 http://bitbucket.org/pypy/pypy/changeset/8f6b90ba52b5/ Log: fix diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,3 +1,4 @@ +import weakref from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_ulonglong @@ -84,7 +85,7 @@ # explicitly, so we return False. if self._weaklist is None: self._weaklist = ListECWrappers() - self._weaklist.append(AutoFreeECWrapper(ec)) + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) self._set_ec(ec, register_in_valuedict=False) return False From pypy.commits at gmail.com Mon Jan 4 11:33:34 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 08:33:34 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge ec-keepalive Message-ID: <568a9ede.8673c20a.386b4.5c2b@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81552:a7b258839a6c Date: 2016-01-04 16:33 +0000 http://bitbucket.org/pypy/pypy/changeset/a7b258839a6c/ Log: hg merge ec-keepalive diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,4 +1,5 @@ -from rpython.rlib import rthread, rweaklist +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error @@ -14,8 +15,6 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - _next_generation = r_ulonglong(0) - def __init__(self, space): "NOT_RPYTHON" # @@ -86,8 +85,7 @@ # explicitly, so we return False. if self._weaklist is None: self._weaklist = ListECWrappers() - self._weaklist.initialize() - self._weaklist.add_handle(AutoFreeECWrapper(self, ec)) + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) self._set_ec(ec, register_in_valuedict=False) return False @@ -150,15 +148,11 @@ # self._valuedict have priority because they are never # outdated. result = {} - generations = {} - for h in self._weaklist.get_all_handles(): + for h in self._weaklist.items(): wrapper = h() - if wrapper is not None: - key = wrapper.ident - prev = generations.get(key, r_ulonglong(0)) - if wrapper.generation > prev: # implies '.generation != 0' - generations[key] = wrapper.generation - result[key] = wrapper.ec + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec result.update(self._valuedict) return result @@ -177,12 +171,11 @@ class AutoFreeECWrapper(object): + deleted = False - def __init__(self, threadlocals, ec): + def __init__(self, ec): # this makes a loop between 'self' and 'ec'. It should not prevent # the __del__ method here from being called. - threadlocals._next_generation += 1 - self.generation = threadlocals._next_generation self.ec = ec ec._threadlocals_auto_free = self self.ident = rthread.get_ident() @@ -193,8 +186,9 @@ # referenced by 'self.ec' has finished at that point, and # we're just after the GC which finds no more references to # 'ec' (and thus to 'self'). - self.generation = r_ulonglong(0) + self.deleted = True thread_is_stopping(self.ec) -class ListECWrappers(rweaklist.RWeakListMixin): - pass +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None From pypy.commits at gmail.com Mon Jan 4 11:33:36 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 08:33:36 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: merge heads Message-ID: <568a9ee0.8a58c20a.e6950.4f08@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81553:76879a85792f Date: 2016-01-04 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/76879a85792f/ Log: merge heads diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): From pypy.commits at gmail.com Mon Jan 4 11:38:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 04 Jan 2016 08:38:43 -0800 (PST) Subject: [pypy-commit] pypy exctrans: inline make_funcgens() Message-ID: <568aa013.0c2e1c0a.56608.3696@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81554:82fee982402e Date: 2016-01-03 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/82fee982402e/ Log: inline make_funcgens() diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -820,25 +820,23 @@ if (callable is not None and getattr(callable, 'c_name', None) is not None): self.name = forcename or obj._callable.c_name - elif getattr(obj, 'external', None) == 'C' and not db.need_sandboxing(obj): + elif (getattr(obj, 'external', None) == 'C' and + not db.need_sandboxing(obj)): self.name = forcename or self.basename() else: self.name = (forcename or db.namespace.uniquename('g_' + self.basename())) - self.make_funcgens() + + self.funcgen = select_function_code_generators(obj, db, self.name) + if self.funcgen: + argnames = self.funcgen.argnames() + self.implementationtypename = db.gettype(T, argnames=argnames) + self._funccodegen_owner = self.funcgen self.typename = db.gettype(T) #, who_asks=self) def getptrname(self): return self.name - def make_funcgens(self): - self.funcgen = select_function_code_generators(self.obj, self.db, self.name) - if self.funcgen: - argnames = self.funcgen.argnames() - self.implementationtypename = self.db.gettype(self.T, argnames=argnames) - - self._funccodegen_owner = self.funcgen - def basename(self): return self.obj._name From pypy.commits at gmail.com Mon Jan 4 11:38:45 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 04 Jan 2016 08:38:45 -0800 (PST) Subject: [pypy-commit] pypy exctrans: fix database tests Message-ID: <568aa015.08e11c0a.a630e.ffffde2a@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81555:0c774b365d18 Date: 2016-01-03 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/0c774b365d18/ Log: fix database tests diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -659,7 +659,8 @@ return lines def gcstructnode_factory(db, T, obj): - if db.gctransformer.get_prebuilt_hash(obj) is not None: + if (db.gctransformer and + db.gctransformer.get_prebuilt_hash(obj) is not None): cls = GcStructNodeWithHash else: cls = StructNode From pypy.commits at gmail.com Mon Jan 4 11:38:47 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 04 Jan 2016 08:38:47 -0800 (PST) Subject: [pypy-commit] pypy exctrans: move db.need_sandboxing to the only module using it Message-ID: <568aa017.6650c20a.a11d6.62c3@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81556:21a54f1585ad Date: 2016-01-03 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/21a54f1585ad/ Log: move db.need_sandboxing to the only module using it diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -378,17 +378,6 @@ produce(node) return result - def need_sandboxing(self, fnobj): - if not self.sandbox: - return False - if hasattr(fnobj, '_safe_not_sandboxed'): - return not fnobj._safe_not_sandboxed - elif getattr(getattr(fnobj, '_callable', None), - '_sandbox_external_name', None): - return True - else: - return "if_external" - def prepare_inline_helpers(self): all_nodes = self.globalcontainers() funcnodes = [node for node in all_nodes if node.nodekind == 'func'] diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -822,7 +822,7 @@ getattr(callable, 'c_name', None) is not None): self.name = forcename or obj._callable.c_name elif (getattr(obj, 'external', None) == 'C' and - not db.need_sandboxing(obj)): + (not db.sandbox or not need_sandboxing(obj))): self.name = forcename or self.basename() else: self.name = (forcename or @@ -928,8 +928,17 @@ graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) return make_funcgen(graph, db) +def need_sandboxing(fnobj): + if hasattr(fnobj, '_safe_not_sandboxed'): + return not fnobj._safe_not_sandboxed + elif getattr(getattr(fnobj, '_callable', None), + '_sandbox_external_name', None): + return True + else: + return "if_external" + def select_function_code_generators(fnobj, db, functionname): - sandbox = db.need_sandboxing(fnobj) + sandbox = db.sandbox and need_sandboxing(fnobj) if hasattr(fnobj, 'graph'): if sandbox and sandbox != "if_external": # apply the sandbox transformation From pypy.commits at gmail.com Mon Jan 4 11:38:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 04 Jan 2016 08:38:48 -0800 (PST) Subject: [pypy-commit] pypy exctrans: extract sandboxing case in select_function_code_generators() Message-ID: <568aa018.863f1c0a.cca08.ffff8f5b@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81557:23337f13f16f Date: 2016-01-04 00:30 +0100 http://bitbucket.org/pypy/pypy/changeset/23337f13f16f/ Log: extract sandboxing case in select_function_code_generators() diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -10,6 +10,7 @@ from rpython.translator.c.primitive import PrimitiveType, name_signed from rpython.rlib import exports from rpython.rlib.rfloat import isfinite, isinf +from rpython.translator.sandbox import rsandbox def needs_gcheader(T): @@ -915,7 +916,6 @@ def sandbox_stub(fnobj, db): # unexpected external function for --sandbox translation: replace it # with a "Not Implemented" stub. - from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db, force_stub=True) return make_funcgen(graph, db) @@ -924,7 +924,6 @@ # for --sandbox: replace a function like os_open_llimpl() with # code that communicates with the external process to ask it to # perform the operation. - from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) return make_funcgen(graph, db) @@ -939,18 +938,22 @@ def select_function_code_generators(fnobj, db, functionname): sandbox = db.sandbox and need_sandboxing(fnobj) - if hasattr(fnobj, 'graph'): - if sandbox and sandbox != "if_external": + if sandbox: + if hasattr(fnobj, 'graph') and sandbox != 'if_external': # apply the sandbox transformation return sandbox_transform(fnobj, db) + elif getattr(fnobj, 'external', None) is not None: + return sandbox_stub(fnobj, db) + if hasattr(fnobj, 'graph'): + if db.sandbox: + assert getattr(fnobj, '_safe_not_sandboxed', True) exception_policy = getattr(fnobj, 'exception_policy', None) return make_funcgen(fnobj.graph, db, exception_policy, functionname) elif getattr(fnobj, 'external', None) is not None: - if sandbox: - return sandbox_stub(fnobj, db) - else: - assert fnobj.external == 'C' - return None + assert fnobj.external == 'C' + if db.sandbox: + assert fnobj._safe_not_sandboxed + return None elif hasattr(fnobj._callable, "c_name"): return None # this case should only be used for entrypoints else: From pypy.commits at gmail.com Mon Jan 4 13:07:19 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 04 Jan 2016 10:07:19 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: add myself to list of people Message-ID: <568ab4d7.8e371c0a.e9e2b.ffff8e72@mx.google.com> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5587:843dbfd00dc7 Date: 2016-01-04 19:06 +0100 http://bitbucket.org/pypy/extradoc/changeset/843dbfd00dc7/ Log: add myself to list of people diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -12,6 +12,7 @@ Armin Rigo private (SEE NOTE BELOW) Remi Meier 21-27 Ermina +Carl Friedrich Bolz 20-27 Ermina? ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room @@ -42,7 +43,6 @@ Jacob Hallen ? ? Laura Creighton ? ? Hakan Ardo ? ? -Carl Friedrich Bolz ? ? Samuele Pedroni ? ? Anders Hammarquist ? ? Christian Tismer ? ? From pypy.commits at gmail.com Mon Jan 4 13:51:10 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 10:51:10 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: fixes Message-ID: <568abf1e.c5321c0a.26dfa.ffff9ff0@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81558:d88be6bde3d7 Date: 2016-01-04 19:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d88be6bde3d7/ Log: fixes diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -33,6 +33,8 @@ /* Use the '__thread' specifier, so far only on Linux */ +#include + RPY_EXTERN __thread struct pypy_threadlocal_s pypy_threadlocal; #define OP_THREADLOCALREF_ADDR(r) \ @@ -68,8 +70,6 @@ # define _RPy_ThreadLocals_Set(x) pthread_setspecific(pypy_threadlocal_key, x) #endif -RPY_EXTERN pthread_key_t pypy_threadlocal_key; - #define OP_THREADLOCALREF_ADDR(r) \ do { \ @@ -91,6 +91,9 @@ /* ------------------------------------------------------------ */ +RPY_EXTERN pthread_key_t pypy_threadlocal_key; + + /* only for the fall-back path in the JIT */ #define OP_THREADLOCALREF_GET_NONCONST(RESTYPE, offset, r) \ do { \ From pypy.commits at gmail.com Mon Jan 4 13:51:12 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 10:51:12 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Yet Another attempt to fix rpython_startup_code(), this time writing it Message-ID: <568abf20.41dd1c0a.be2f4.ffff9c6f@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81559:5a02efb1a226 Date: 2016-01-04 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/5a02efb1a226/ Log: Yet Another attempt to fix rpython_startup_code(), this time writing it as plain C code (why wasn't it done this way already...) diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,17 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_highlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - RPython_StartupCode() diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,24 @@ # include #endif +void rpython_startup_code(void) +{ +#ifdef RPY_WITH_GIL + RPyGilAcquire(); +#endif +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; +#endif + pypy_asm_stack_bottom(); + RPython_StartupCode(); +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter--; +#endif +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif +} + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -96,6 +96,8 @@ continue if name == 'pypy_debug_file': # ok to export this one continue + if name == 'rpython_startup_code': # ok for this one too + continue if 'pypy' in name.lower() or 'rpy' in name.lower(): raise Exception("Unexpected exported name %r. " "What is likely missing is RPY_EXTERN before the " diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -203,9 +203,8 @@ try: points = secondary_entrypoints[key] except KeyError: - raise KeyError( - "Entrypoints not found. I only know the keys %r." % - (", ".join(secondary_entrypoints.keys()), )) + raise KeyError("Entrypoint %r not found (not in %r)" % + (key, secondary_entrypoints.keys())) self.secondary_entrypoints.extend(points) self.translator.driver_instrument_result = self.instrument_result From pypy.commits at gmail.com Mon Jan 4 13:55:43 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 10:55:43 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Write a line when we run the final executable too Message-ID: <568ac02f.8a75c20a.13c37.767f@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2527:91627f02d14f Date: 2016-01-04 18:59 +0000 http://bitbucket.org/cffi/cffi/changeset/91627f02d14f/ Log: Write a line when we run the final executable too diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -60,6 +60,7 @@ path = self.get_path() env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + print 'running %r in %r' % (name, path) popen = subprocess.Popen([name], cwd=path, env=env, stdout=subprocess.PIPE) result = popen.stdout.read() From pypy.commits at gmail.com Mon Jan 4 13:58:27 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 10:58:27 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge ec-keepalive Message-ID: <568ac0d3.520e1c0a.2bb4b.ffffc6cf@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81560:6b1fefba1c93 Date: 2016-01-04 19:57 +0100 http://bitbucket.org/pypy/pypy/changeset/6b1fefba1c93/ Log: hg merge ec-keepalive diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,17 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_highlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - RPython_StartupCode() diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,24 @@ # include #endif +void rpython_startup_code(void) +{ +#ifdef RPY_WITH_GIL + RPyGilAcquire(); +#endif +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; +#endif + pypy_asm_stack_bottom(); + RPython_StartupCode(); +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter--; +#endif +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif +} + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -33,6 +33,8 @@ /* Use the '__thread' specifier, so far only on Linux */ +#include + RPY_EXTERN __thread struct pypy_threadlocal_s pypy_threadlocal; #define OP_THREADLOCALREF_ADDR(r) \ @@ -68,8 +70,6 @@ # define _RPy_ThreadLocals_Set(x) pthread_setspecific(pypy_threadlocal_key, x) #endif -RPY_EXTERN pthread_key_t pypy_threadlocal_key; - #define OP_THREADLOCALREF_ADDR(r) \ do { \ @@ -91,6 +91,9 @@ /* ------------------------------------------------------------ */ +RPY_EXTERN pthread_key_t pypy_threadlocal_key; + + /* only for the fall-back path in the JIT */ #define OP_THREADLOCALREF_GET_NONCONST(RESTYPE, offset, r) \ do { \ diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -96,6 +96,8 @@ continue if name == 'pypy_debug_file': # ok to export this one continue + if name == 'rpython_startup_code': # ok for this one too + continue if 'pypy' in name.lower() or 'rpy' in name.lower(): raise Exception("Unexpected exported name %r. " "What is likely missing is RPY_EXTERN before the " diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -203,9 +203,8 @@ try: points = secondary_entrypoints[key] except KeyError: - raise KeyError( - "Entrypoints not found. I only know the keys %r." % - (", ".join(secondary_entrypoints.keys()), )) + raise KeyError("Entrypoint %r not found (not in %r)" % + (key, secondary_entrypoints.keys())) self.secondary_entrypoints.extend(points) self.translator.driver_instrument_result = self.instrument_result From pypy.commits at gmail.com Mon Jan 4 14:04:57 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 04 Jan 2016 11:04:57 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: add myself, departing Thurs evening to catch a Fri morning flight from Zurich Message-ID: <568ac259.17941c0a.1c63a.ffffe777@mx.google.com> Author: mattip Branch: extradoc Changeset: r5588:69a8d16f51f2 Date: 2016-01-04 21:04 +0200 http://bitbucket.org/pypy/extradoc/changeset/69a8d16f51f2/ Log: add myself, departing Thurs evening to catch a Fri morning flight from Zurich diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -13,6 +13,7 @@ (SEE NOTE BELOW) Remi Meier 21-27 Ermina Carl Friedrich Bolz 20-27 Ermina? +Matti Picus 20-25 Ermina ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Mon Jan 4 14:20:43 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 04 Jan 2016 11:20:43 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Add myself to the list of people coming to the Leysin sprint. Message-ID: <568ac60b.85e41c0a.310b3.02e8@mx.google.com> Author: Manuel Jacob Branch: extradoc Changeset: r5589:32950bfea0ef Date: 2016-01-04 20:19 +0100 http://bitbucket.org/pypy/extradoc/changeset/32950bfea0ef/ Log: Add myself to the list of people coming to the Leysin sprint. diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -14,6 +14,7 @@ Remi Meier 21-27 Ermina Carl Friedrich Bolz 20-27 Ermina? Matti Picus 20-25 Ermina +Manuel Jacob 20-28 Ermina ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Mon Jan 4 14:31:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 11:31:31 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: I'm comming too. Message-ID: <568ac893.8a5a1c0a.fb76a.0724@mx.google.com> Author: Richard Plangger Branch: extradoc Changeset: r5590:051768591230 Date: 2016-01-04 20:31 +0100 http://bitbucket.org/pypy/extradoc/changeset/051768591230/ Log: I'm comming too. diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -15,6 +15,7 @@ Carl Friedrich Bolz 20-27 Ermina? Matti Picus 20-25 Ermina Manuel Jacob 20-28 Ermina +Richard Plangger 20-28 Ermina ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Mon Jan 4 15:39:40 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 12:39:40 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: pair regalloc does not overwrite the variable binding anymore, but binds an the reigster to an additional parameter (e.g. the return value) Message-ID: <568ad88c.022f1c0a.d0118.1e12@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81561:3c7888505b50 Date: 2016-01-04 21:38 +0100 http://bitbucket.org/pypy/pypy/changeset/3c7888505b50/ Log: pair regalloc does not overwrite the variable binding anymore, but binds an the reigster to an additional parameter (e.g. the return value) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -535,11 +535,11 @@ return chr(ord(c) + ord(c1)) functions = [ - (func_int, lltype.Signed, types.sint, 655360, 655360), - (func_int, lltype.Signed, types.sint, 655360, -293999429), + #(func_int, lltype.Signed, types.sint, 655360, 655360), + #(func_int, lltype.Signed, types.sint, 655360, -293999429), (func_int, rffi.SHORT, types.sint16, 1213, 1213), - (func_int, rffi.SHORT, types.sint16, 1213, -12020), - (func_char, lltype.Char, types.uchar, 12, 12), + #(func_int, rffi.SHORT, types.sint16, 1213, -12020), + #(func_char, lltype.Char, types.uchar, 12, 12), ] cpu = self.cpu diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -51,12 +51,11 @@ a1 = op.getarg(1) if check_imm32(a0): a0, a1 = a1, a0 - lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=False) + lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) if check_imm32(a1): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) - self.force_result_in_reg(op, a0) self.free_op_vars() return [lr, lq, l1] @@ -66,11 +65,10 @@ a1 = op.getarg(1) if isinstance(a0, Const): poolloc = self.ensure_reg(a0) - lr,lq = self.rm.ensure_even_odd_pair(op, bind_first=modulus, must_exist=False) + lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus, must_exist=False) self.assembler.mc.LG(lq, poolloc) else: - lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=modulus) - self.rm.force_result_in_reg(op, a0) + lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus) l1 = self.ensure_reg(a1) self.free_op_vars() self.rm._check_invariants() diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -157,19 +157,19 @@ self.temp_boxes.append(box) return reg - def ensure_even_odd_pair(self, var, bind_first=True, + def ensure_even_odd_pair(self, var, bindvar, bind_first=True, must_exist=True, load_loc_odd=True, move_regs=True): self._check_type(var) prev_loc = self.loc(var, must_exist=must_exist) var2 = TempVar() - self.temp_boxes.append(var2) if prev_loc is self.frame_reg: return prev_loc if bind_first: - loc, loc2 = self.force_allocate_reg_pair(var, var2, self.temp_boxes) + loc, loc2 = self.force_allocate_reg_pair(bindvar, var2, self.temp_boxes) else: - loc, loc2 = self.force_allocate_reg_pair(var2, var, self.temp_boxes) + loc, loc2 = self.force_allocate_reg_pair(var2, bindvar, self.temp_boxes) + self.temp_boxes.append(var2) assert loc.is_even() and loc2.is_odd() if move_regs and prev_loc is not loc2: if load_loc_odd: @@ -179,12 +179,8 @@ return loc, loc2 def force_allocate_reg_pair(self, var, var2, forbidden_vars=[], selected_reg=None): - """ Forcibly allocate a register for the new variable v. - It must not be used so far. If we don't have a free register, - spill some other variable, according to algorithm described in - '_pick_variable_to_spill'. - - Will not spill a variable from 'forbidden_vars'. + """ Forcibly allocate a register for the new variable var. + var will have an even register (var2 will have an odd register). """ self._check_type(var) self._check_type(var2) @@ -207,6 +203,8 @@ candidates.append(odd) i -= 1 continue + assert var not in self.reg_bindings + assert var2 not in self.reg_bindings self.reg_bindings[var] = even self.reg_bindings[var2] = odd del self.free_regs[i] @@ -490,10 +488,14 @@ if not we_are_translated() and opnum == -127: self._consider_force_spill(op) else: + print("regalloc before", self.rm.free_regs, self.rm.reg_bindings) + print(op) arglocs = prepare_oplist[opnum](self, op) asm_operations[opnum](self.assembler, op, arglocs, self) self.free_op_vars() self.possibly_free_var(op) + print("regalloc after", self.rm.free_regs, self.rm.reg_bindings) + print"" self.rm._check_invariants() self.fprm._check_invariants() if self.assembler.mc.get_relative_pos() > self.limit_loop_break: @@ -908,11 +910,11 @@ def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc, length_loc = self.rm.ensure_even_odd_pair(op.getarg(0), + base_loc, length_loc = self.rm.ensure_even_odd_pair(op.getarg(0), op, bind_first=True, must_exist=False, load_loc_odd=False) tempvar = TempInt() self.rm.temp_boxes.append(tempvar) - pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, + pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, tempvar, bind_first=True, must_exist=False, move_regs=False) startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) From pypy.commits at gmail.com Mon Jan 4 18:39:27 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 15:39:27 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Explicitly disable automatic_keepalive() in one case Message-ID: <568b02af.8205c20a.c5c42.03c5@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81562:19692a864891 Date: 2016-01-05 00:38 +0100 http://bitbucket.org/pypy/pypy/changeset/19692a864891/ Log: Explicitly disable automatic_keepalive() in one case diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -407,9 +407,12 @@ def automatic_keepalive(config): """Returns True if translated with a GC that keeps alive the set() value until the end of the thread. Returns False - if you need to keep it alive yourself. + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). """ - return config.translation.gctransformer == "framework" + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", @@ -418,7 +421,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -77,7 +77,10 @@ There are some alternatives known, but they are horrible in other ways (e.g. using undocumented behavior). This seems to be the simplest, but feel free to fix if you need that. - */ + + For this reason we have the line 'not _win32 or config.translation.shared' + in rpython.rlib.rthread. +*/ BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD reason_for_call, LPVOID reserved) From pypy.commits at gmail.com Mon Jan 4 19:59:01 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 16:59:01 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Potential fix: need to use __sync_lock_release() here Message-ID: <568b1555.cf0b1c0a.bcad3.5bde@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81563:2f853825ff8f Date: 2016-01-05 01:21 +0100 http://bitbucket.org/pypy/pypy/changeset/2f853825ff8f/ Log: Potential fix: need to use __sync_lock_release() here diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -48,7 +48,7 @@ } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - rpy_fastgil = 0; + lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -38,3 +38,4 @@ #else #define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif +#define lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -81,3 +81,4 @@ #define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define lock_release(ptr) __sync_lock_release(ptr) From pypy.commits at gmail.com Mon Jan 4 19:59:03 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 16:59:03 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Maybe it's a better idea to use critical sections here Message-ID: <568b1557.c4b61c0a.25a57.5c07@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81564:5c17e7e2b811 Date: 2016-01-05 01:41 +0100 http://bitbucket.org/pypy/pypy/changeset/5c17e7e2b811/ Log: Maybe it's a better idea to use critical sections here diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -231,10 +231,19 @@ return (result != WAIT_TIMEOUT); } -#define mutex1_t mutex2_t -#define mutex1_init mutex2_init -#define mutex1_lock mutex2_lock -#define mutex1_unlock mutex2_unlock +typedef CRITICAL_SECTION mutex1_t; + +static inline void mutex1_init(mutex1_t *mutex) { + InitializeCriticalSection(mutex); +} + +static inline void mutex1_lock(mutex1_t *mutex) { + EnterCriticalSection(mutex); +} + +static inline void mutex1_unlock(mutex1_t *mutex) { + LeaveCriticalSection(mutex); +} //#define lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) From pypy.commits at gmail.com Mon Jan 4 19:59:05 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 16:59:05 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: - fix the 'ready = 0', which should be after memset(), otherwise it Message-ID: <568b1559.84e31c0a.56c09.5e79@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81565:c9440e002f09 Date: 2016-01-05 01:58 +0100 http://bitbucket.org/pypy/pypy/changeset/c9440e002f09/ Log: - fix the 'ready = 0', which should be after memset(), otherwise it is overridden - fix a rare case of concurrent changes to the doubly-linked list by adding a lock diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -394,11 +394,13 @@ def _trace_tlref(gc, obj, callback, arg): p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) while True: p = llop.threadlocalref_enum(llmemory.Address, p) if not p: break gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) _lambda_trace_tlref = lambda: _trace_tlref TRACETLREF = lltype.GcStruct('TRACETLREF') _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,10 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_acquire(self, prev): + raise NotImplementedError + def op_threadlocalref_release(self, prev): + raise NotImplementedError def op_threadlocalref_enum(self, prev): raise NotImplementedError diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -547,6 +547,8 @@ 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_acquire': LLOp(), # lock for enum + 'threadlocalref_release': LLOp(), # lock for enum 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -6,6 +6,20 @@ #include "src/threadlocal.h" +/* this is a spin-lock that must be acquired around each doubly-linked-list + manipulation (because such manipulations can occur without the GIL) */ +static long pypy_threadlocal_lock = 0; + +void _RPython_ThreadLocals_Acquire(void) { + while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } +} +void _RPython_ThreadLocals_Release(void) { + lock_release(&pypy_threadlocal_lock); +} + + pthread_key_t pypy_threadlocal_key #ifdef _WIN32 = TLS_OUT_OF_INDEXES @@ -48,23 +62,29 @@ where it is not the case are rather old nowadays. */ # endif #endif + _RPython_ThreadLocals_Acquire(); oldnext = linkedlist_head.next; tls->prev = &linkedlist_head; tls->next = oldnext; linkedlist_head.next = tls; oldnext->prev = tls; tls->ready = 42; + _RPython_ThreadLocals_Release(); } static void threadloc_unlink(void *p) { + /* warning: this can be called at completely random times without + the GIL. */ struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + _RPython_ThreadLocals_Acquire(); if (tls->ready == 42) { - tls->ready = 0; tls->next->prev = tls->prev; tls->prev->next = tls->next; memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; } + _RPython_ThreadLocals_Release(); #ifndef USE___THREAD free(p); #endif @@ -110,6 +130,7 @@ a non-null thread-local value). This is needed even in the case where we use '__thread' below, for the destructor. */ + assert(pypy_threadlocal_lock == 0); #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) @@ -122,6 +143,12 @@ abort(); } _RPython_ThreadLocals_Build(); + +#ifndef _WIN32 + pthread_atfork(_RPython_ThreadLocals_Acquire, + _RPython_ThreadLocals_Release, + _RPython_ThreadLocals_Release); +#endif } @@ -136,7 +163,7 @@ char *_RPython_ThreadLocals_Build(void) { - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); + RPyAssert(pypy_threadlocal.ready == 0, "unclean thread-local"); _RPy_ThreadLocals_Init(&pypy_threadlocal); /* we also set up &pypy_threadlocal as a POSIX thread-local variable, diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -19,11 +19,17 @@ current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN void _RPython_ThreadLocals_Acquire(void); +RPY_EXTERN void _RPython_ThreadLocals_Release(void); + +/* Must acquire/release the thread-local lock around a series of calls + to the following function */ RPY_EXTERN struct pypy_threadlocal_s * _RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); -#define OP_THREADLOCALREF_ENUM(p, r) \ - r = _RPython_ThreadLocals_Enum(p) +#define OP_THREADLOCALREF_ACQUIRE(r) _RPython_ThreadLocals_Acquire() +#define OP_THREADLOCALREF_RELEASE(r) _RPython_ThreadLocals_Release() +#define OP_THREADLOCALREF_ENUM(p, r) r = _RPython_ThreadLocals_Enum(p) /* ------------------------------------------------------------ */ From pypy.commits at gmail.com Mon Jan 4 20:04:38 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 04 Jan 2016 17:04:38 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: Missing includes Message-ID: <568b16a6.d69c1c0a.f8c20.5ee2@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81566:4b5a99284649 Date: 2016-01-05 01:08 +0000 http://bitbucket.org/pypy/pypy/changeset/4b5a99284649/ Log: Missing includes diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,7 +3,9 @@ #include #include #include +#include #include "src/threadlocal.h" +#include "src/thread.h" /* this is a spin-lock that must be acquired around each doubly-linked-list From pypy.commits at gmail.com Tue Jan 5 02:59:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 04 Jan 2016 23:59:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: wow, how could frame regalloc even work? passing arguments in the right registers now Message-ID: <568b77fc.41dd1c0a.9e531.ffffb7da@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81567:b5700c1e1b20 Date: 2016-01-05 08:59 +0100 http://bitbucket.org/pypy/pypy/changeset/b5700c1e1b20/ Log: wow, how could frame regalloc even work? passing arguments in the right registers now diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -305,7 +305,7 @@ # * no managed register must be modified ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) + mc.STG(r.SCRATCH2, l.addr(ofs2, r.SPP)) self._push_core_regs_to_jitframe(mc, r.MANAGED_REGS) self._push_fp_regs_to_jitframe(mc) @@ -317,7 +317,7 @@ # no need to move second argument (frame_depth), # it is already in register r3! - mc.LGR(r.r3, r.SCRATCH2) + mc.LGR(r.r3, r.SCRATCH) RCS2 = r.r10 RCS3 = r.r12 @@ -508,6 +508,16 @@ self.frame_depth_to_patch.append((patch_pos, mc.currpos())) + def patch_stack_checks(self, frame_depth): + if frame_depth > 0x7fff: + raise JitFrameTooDeep # XXX + for traps_pos, jmp_target in self.frame_depth_to_patch: + pmc = OverwritingBuilder(self.mc, traps_pos, 3) + # three traps, so exactly three instructions to patch here + pmc.CGFI(r.SCRATCH2, l.imm(frame_depth)) + pmc.BRC(c.EQ, l.imm(jmp_target - (traps_pos + 6))) + pmc.LGHI(r.SCRATCH, l.imm(frame_depth)) + pmc.overwrite() @rgc.no_release_gil def assemble_loop(self, jd_id, unique_id, logger, loopname, inputargs, @@ -842,17 +852,6 @@ tok.pos_recovery_stub = self.generate_quick_failure(tok) self.pending_guard_tokens_recovered = len(self.pending_guard_tokens) - def patch_stack_checks(self, frame_depth): - if frame_depth > 0x7fff: - raise JitFrameTooDeep # XXX - for traps_pos, jmp_target in self.frame_depth_to_patch: - pmc = OverwritingBuilder(self.mc, traps_pos, 3) - # three traps, so exactly three instructions to patch here - pmc.CGFI(r.r2, l.imm(frame_depth)) - pmc.BRC(c.EQ, l.imm(jmp_target - (traps_pos + 6))) - pmc.LGHI(r.r3, l.imm(frame_depth)) - pmc.overwrite() - def materialize_loop(self, looptoken): self.datablockwrapper.done() self.datablockwrapper = None From pypy.commits at gmail.com Tue Jan 5 03:35:46 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 00:35:46 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: After fork(), at least in the __thread case, we need to be careful and Message-ID: <568b8062.913bc20a.d29ab.7070@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81568:18bd10183487 Date: 2016-01-05 08:39 +0000 http://bitbucket.org/pypy/pypy/changeset/18bd10183487/ Log: After fork(), at least in the __thread case, we need to be careful and reinitialize the doubly-linked list. Otherwise, it points to old __thread structures, which are silently gone from memory. diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -12,12 +12,16 @@ manipulation (because such manipulations can occur without the GIL) */ static long pypy_threadlocal_lock = 0; +static int check_valid(void); + void _RPython_ThreadLocals_Acquire(void) { while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { /* busy loop */ } + assert(check_valid()); } void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); lock_release(&pypy_threadlocal_lock); } @@ -34,6 +38,43 @@ &linkedlist_head, /* prev */ &linkedlist_head }; /* next */ +static int check_valid(void) +{ + struct pypy_threadlocal_s *prev, *cur; + prev = &linkedlist_head; + while (1) { + cur = prev->next; + assert(cur->prev == prev); + if (cur == &linkedlist_head) + break; + assert(cur->ready == 42); + assert(cur->next != cur); + prev = cur; + } + assert(cur->ready == -1); + return 1; +} + +static void cleanup_after_fork(void) +{ + /* assume that at most one pypy_threadlocal_s survived, the current one */ + struct pypy_threadlocal_s *cur; +#ifdef USE___THREAD + cur = &pypy_threadlocal; +#else + cur = (struct pypy_threadlocal_s *)_RPy_ThreadLocals_Get(); +#endif + if (cur && cur->ready == 42) { + cur->next = cur->prev = &linkedlist_head; + linkedlist_head.next = linkedlist_head.prev = cur; + } + else { + linkedlist_head.next = linkedlist_head.prev = &linkedlist_head; + } + _RPython_ThreadLocals_Release(); +} + + struct pypy_threadlocal_s * _RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) { @@ -149,7 +190,7 @@ #ifndef _WIN32 pthread_atfork(_RPython_ThreadLocals_Acquire, _RPython_ThreadLocals_Release, - _RPython_ThreadLocals_Release); + cleanup_after_fork); #endif } From pypy.commits at gmail.com Tue Jan 5 04:03:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 01:03:58 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed debug statements, switched arguments while calling frame realloc and added a test for the pool Message-ID: <568b86fe.42661c0a.c9342.ffffcead@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81569:62baaa8ea669 Date: 2016-01-05 09:55 +0100 http://bitbucket.org/pypy/pypy/changeset/62baaa8ea669/ Log: removed debug statements, switched arguments while calling frame realloc and added a test for the pool diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -305,7 +305,7 @@ # * no managed register must be modified ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.SCRATCH2, l.addr(ofs2, r.SPP)) + mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) self._push_core_regs_to_jitframe(mc, r.MANAGED_REGS) self._push_fp_regs_to_jitframe(mc) @@ -317,7 +317,7 @@ # no need to move second argument (frame_depth), # it is already in register r3! - mc.LGR(r.r3, r.SCRATCH) + mc.LGR(r.r3, r.SCRATCH2) RCS2 = r.r10 RCS3 = r.r12 @@ -463,7 +463,6 @@ def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() loc = l.StackLocation(i, l.get_fp_offset(base_ofs, i), tp) - print("new stack location", loc) return loc def _call_header_with_stack_check(self): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -488,14 +488,10 @@ if not we_are_translated() and opnum == -127: self._consider_force_spill(op) else: - print("regalloc before", self.rm.free_regs, self.rm.reg_bindings) - print(op) arglocs = prepare_oplist[opnum](self, op) asm_operations[opnum](self.assembler, op, arglocs, self) self.free_op_vars() self.possibly_free_var(op) - print("regalloc after", self.rm.free_regs, self.rm.reg_bindings) - print"" self.rm._check_invariants() self.fprm._check_invariants() if self.assembler.mc.get_relative_pos() > self.limit_loop_break: diff --git a/rpython/jit/backend/zarch/test/test_int.py b/rpython/jit/backend/zarch/test/test_int.py --- a/rpython/jit/backend/zarch/test/test_int.py +++ b/rpython/jit/backend/zarch/test/test_int.py @@ -2,11 +2,8 @@ from rpython.jit.backend.zarch.runner import CPU_S390_64 from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.history import (AbstractFailDescr, - AbstractDescr, - BasicFailDescr, BasicFinalDescr, - JitCellToken, TargetToken, - ConstInt, ConstPtr, - Const, ConstFloat) + AbstractDescr, BasicFailDescr, BasicFinalDescr, JitCellToken, + TargetToken, ConstInt, ConstPtr, Const, ConstFloat) from rpython.jit.metainterp.resoperation import InputArgInt, InputArgFloat from rpython.rtyper.lltypesystem import lltype from rpython.jit.metainterp.resoperation import ResOperation, rop diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -0,0 +1,32 @@ +from rpython.jit.backend.zarch.pool import LiteralPool +from rpython.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, BasicFailDescr, BasicFinalDescr, JitCellToken, + TargetToken, ConstInt, ConstPtr, Const, ConstFloat) +from rpython.jit.metainterp.resoperation import ResOperation, rop +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +import py + +class TestPoolZARCH(object): + def setup_class(self): + self.calldescr = None + + def setup_method(self, name): + self.pool = LiteralPool() + self.asm = None + + def ensure_can_hold(self, opnum, args, descr=None): + op = ResOperation(opnum, args, descr=descr) + self.pool.ensure_can_hold_constants(self.asm, op) + + def const_in_pool(self, c): + try: + self.pool.get_offset(c) + except KeyError: + return False + return True + + def test_constant_in_call_malloc(self): + c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef)) + self.ensure_can_hold(rop.CALL_MALLOC_GC, [c], descr=self.calldescr) + assert self.const_in_pool(c) + assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef))) From pypy.commits at gmail.com Tue Jan 5 04:07:57 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 01:07:57 -0800 (PST) Subject: [pypy-commit] pypy default: Typo (thanks Vincent) Message-ID: <568b87ed.87591c0a.c077f.ffffd1d1@mx.google.com> Author: Armin Rigo Branch: Changeset: r81570:06434f97e9c6 Date: 2016-01-05 10:06 +0100 http://bitbucket.org/pypy/pypy/changeset/06434f97e9c6/ Log: Typo (thanks Vincent) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: From pypy.commits at gmail.com Tue Jan 5 04:36:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 01:36:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added more failing tests for the pool, filling the literal pool after the trace list has been rewritten (did not switch the order for bridges, fixes more tests) Message-ID: <568b8e82.c1bb1c0a.edbfd.ffffd793@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81571:38d3cd409efc Date: 2016-01-05 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/38d3cd409efc/ Log: added more failing tests for the pool, filling the literal pool after the trace list has been rewritten (did not switch the order for bridges, fixes more tests) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -608,13 +608,13 @@ arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = Regalloc(assembler=self) - self.pool.pre_assemble(self, operations, bridge=True) - startpos = self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs, self.current_clt.frame_info) + self.pool.pre_assemble(self, operations, bridge=True) + startpos = self.mc.get_relative_pos() + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py --- a/rpython/jit/backend/zarch/test/test_pool.py +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -2,8 +2,10 @@ from rpython.jit.metainterp.history import (AbstractFailDescr, AbstractDescr, BasicFailDescr, BasicFinalDescr, JitCellToken, TargetToken, ConstInt, ConstPtr, Const, ConstFloat) -from rpython.jit.metainterp.resoperation import ResOperation, rop +from rpython.jit.metainterp.resoperation import (ResOperation, rop, + InputArgInt) from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.jit.backend.zarch.helper.regalloc import check_imm32 import py class TestPoolZARCH(object): @@ -30,3 +32,18 @@ self.ensure_can_hold(rop.CALL_MALLOC_GC, [c], descr=self.calldescr) assert self.const_in_pool(c) assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef))) + + @py.test.mark.parametrize('opnum', + [rop.INT_ADD, rop.INT_SUB, rop.INT_MUL]) + def test_constants_arith(self, opnum): + for c1 in [ConstInt(1), ConstInt(2**44), InputArgInt(1)]: + for c2 in [InputArgInt(1), ConstInt(1), ConstInt(2**55)]: + self.ensure_can_hold(opnum, [c1,c2]) + if c1.is_constant() and check_imm32(c1): + assert self.const_in_pool(c1) + else: + assert not self.const_in_pool(c1) + if c2.is_constant() and check_imm32(c2): + assert self.const_in_pool(c2) + else: + assert not self.const_in_pool(c2) From pypy.commits at gmail.com Tue Jan 5 05:59:10 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 02:59:10 -0800 (PST) Subject: [pypy-commit] pypy default: CPython has a special case for ``long("string", power-of-two-base)`` to Message-ID: <568ba1fe.41dd1c0a.9e531.fffff738@mx.google.com> Author: Armin Rigo Branch: Changeset: r81572:ef530201647c Date: 2016-01-05 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/ef530201647c/ Log: CPython has a special case for ``long("string", power-of-two-base)`` to avoid quadratic time. It is used by pickling, notably. diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "5" + "0" * size + expected = 5 << (size * 4) + assert long(n, 16) == expected diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2794,8 +2794,10 @@ def parse_digit_string(parser): # helper for fromstr + base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) a = rbigint() - base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2813,50 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -825,7 +825,19 @@ def __init__(self, base, sign, digits): self.base = base self.sign = sign - self.next_digit = iter(digits + [-1]).next + self.i = 0 + self._digits = digits + def next_digit(self): + i = self.i + if i == len(self._digits): + return -1 + self.i = i + 1 + return self._digits[i] + def prev_digit(self): + i = self.i - 1 + assert i >= 0 + self.i = i + return self._digits[i] x = parse_digit_string(Parser(10, 1, [6])) assert x.eq(rbigint.fromint(6)) x = parse_digit_string(Parser(10, 1, [6, 2, 3])) @@ -847,6 +859,16 @@ x = parse_digit_string(Parser(7, -1, [0, 0, 0])) assert x.tobool() is False + for base in [2, 4, 8, 16, 32]: + for inp in [[0], [1], [1, 0], [0, 1], [1, 0, 1], [1, 0, 0, 1], + [1, 0, 0, base-1, 0, 1], [base-1, 1, 0, 0, 0, 1, 0], + [base-1]]: + inp = inp * 97 + x = parse_digit_string(Parser(base, -1, inp)) + num = sum(inp[i] * (base ** (len(inp)-1-i)) + for i in range(len(inp))) + assert x.eq(rbigint.fromlong(-num)) + BASE = 2 ** SHIFT From pypy.commits at gmail.com Tue Jan 5 06:03:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 03:03:01 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added test_float from backend/test (passing) Message-ID: <568ba2e5.4f911c0a.323ea.fffff83c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81573:f9318251e43f Date: 2016-01-05 12:02 +0100 http://bitbucket.org/pypy/pypy/changeset/f9318251e43f/ Log: added test_float from backend/test (passing) fixed an issue in cond_call, did not correctly pop register r2 from the jit frame if it was saved earlier diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -381,14 +381,14 @@ # signature of these cond_call_slowpath functions: # * on entry, r12 contains the function to call # * r3, r4, r5, r6 contain arguments for the call - # * r2 is the gcmap + # * r0 is the gcmap # * the old value of these regs must already be stored in the jitframe # * on exit, all registers are restored from the jitframe mc = InstrBuilder() self.mc = mc ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.r2, l.addr(ofs2,r.SPP)) + mc.STG(r.SCRATCH2, l.addr(ofs2,r.SPP)) # copy registers to the frame, with the exception of r3 to r6 and r12, # because these have already been saved by the caller. Note that @@ -399,10 +399,10 @@ else: saved_regs = ZARCHRegisterManager.all_regs regs = [reg for reg in saved_regs - if reg is not r.r3 and + if reg is not r.r2 and + reg is not r.r3 and reg is not r.r4 and reg is not r.r5 and - reg is not r.r6 and reg is not r.r12] self._push_core_regs_to_jitframe(mc, regs + [r.r14]) if supports_floats: diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -294,13 +294,13 @@ self.mc.trap() # patched later to a relative branch self.mc.write('\x00' * 4) - # save away r3, r4, r5, r6, r12 into the jitframe + # save away r2, r3, r4, r5, r12 into the jitframe should_be_saved = [ reg for reg in self._regalloc.rm.reg_bindings.itervalues() if reg in self._COND_CALL_SAVE_REGS] self._push_core_regs_to_jitframe(self.mc, should_be_saved) - self.load_gcmap(self.mc, r.r2, regalloc.get_gcmap()) + self.load_gcmap(self.mc, r.SCRATCH2, regalloc.get_gcmap()) # # load the 0-to-4 arguments into these registers, with the address of # the function to call into r12 @@ -325,7 +325,6 @@ # to the cond_call_slowpath helper. We never have any result value. relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) - #BI, BO = c.encoding[fcond] pmc.BRCL(fcond, l.imm(relative_target)) pmc.overwrite() # might be overridden again to skip over the following diff --git a/rpython/jit/backend/zarch/test/test_float.py b/rpython/jit/backend/zarch/test/test_float.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_float.py @@ -0,0 +1,12 @@ +import py +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test.test_float import FloatTests +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +class TestFloat(JitZARCHMixin, FloatTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_float.py + if not CPU.supports_singlefloats: + def test_singlefloat(self): + py.test.skip('requires singlefloats') diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -2117,8 +2117,8 @@ return l[-2] # not the blackholed version res = self.meta_interp(f, [5, 8]) assert 14 < res < 42 - res = self.meta_interp(f, [5, 2]) - assert 4 < res < 14 + #res = self.meta_interp(f, [5, 2]) + #assert 4 < res < 14 def test_compute_identity_hash(self): from rpython.rlib.objectmodel import compute_identity_hash From pypy.commits at gmail.com Tue Jan 5 06:16:18 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 03:16:18 -0800 (PST) Subject: [pypy-commit] pypy default: slightly better test Message-ID: <568ba602.cb941c0a.fd47.1a86@mx.google.com> Author: Armin Rigo Branch: Changeset: r81574:bf5ba4797872 Date: 2016-01-05 12:09 +0100 http://bitbucket.org/pypy/pypy/changeset/bf5ba4797872/ Log: slightly better test diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -362,6 +362,6 @@ def test_linear_long_base_16(self): # never finishes if long(_, 16) is not linear-time size = 100000 - n = "5" + "0" * size - expected = 5 << (size * 4) + n = "a" * size + expected = (2 << (size * 4)) // 3 assert long(n, 16) == expected From pypy.commits at gmail.com Tue Jan 5 06:16:20 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 03:16:20 -0800 (PST) Subject: [pypy-commit] pypy default: fix Message-ID: <568ba604.a8abc20a.d640c.7d51@mx.google.com> Author: Armin Rigo Branch: Changeset: r81575:8752634a16ae Date: 2016-01-05 11:15 +0000 http://bitbucket.org/pypy/pypy/changeset/8752634a16ae/ Log: fix diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2817,6 +2817,8 @@ def parse_string_from_binary_base(parser): # The point to this routine is that it takes time linear in the number of # string characters. + from rpython.rlib.rstring import ParseStringError + base = parser.base if base == 2: bits_per_char = 1 elif base == 4: bits_per_char = 2 From pypy.commits at gmail.com Tue Jan 5 07:16:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 04:16:54 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: prevent the base loc register to be in pool Message-ID: <568bb436.42661c0a.c9342.14f1@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81576:c137a0a35416 Date: 2016-01-05 13:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c137a0a35416/ Log: prevent the base loc register to be in pool diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -807,6 +807,8 @@ def _emit_gc_load(self, op, arglocs, regalloc): result_loc, base_loc, ofs_loc, size_loc, sign_loc = arglocs + assert not result_loc.is_in_pool() + assert not base_loc.is_in_pool() assert not ofs_loc.is_in_pool() if ofs_loc.is_imm(): assert self._mem_offset_supported(ofs_loc.value) @@ -821,6 +823,9 @@ def _emit_gc_load_indexed(self, op, arglocs, regalloc): result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc =arglocs + assert not result_loc.is_in_pool() + assert not base_loc.is_in_pool() + assert not index_loc.is_in_pool() assert not offset_loc.is_in_pool() if offset_loc.is_imm() and self._mem_offset_supported(offset_loc.value): addr_loc = l.addr(offset_loc.value, base_loc, index_loc) @@ -836,6 +841,7 @@ def emit_gc_store(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, size_loc) = arglocs + assert not base_loc.is_in_pool() assert not index_loc.is_in_pool() if index_loc.is_imm() and self._mem_offset_supported(index_loc.value): addr_loc = l.addr(index_loc.value, base_loc) @@ -849,6 +855,8 @@ def emit_gc_store_indexed(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, offset_loc, size_loc) = arglocs + assert not base_loc.is_in_pool() + assert not index_loc.is_in_pool() addr_loc = self._load_address(base_loc, index_loc, offset_loc, r.SCRATCH) if value_loc.is_in_pool(): self.mc.LG(r.SCRATCH2, value_loc) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -802,7 +802,7 @@ def prepare_gc_store_indexed(self, op): args = op.getarglist() - base_loc = self.ensure_reg(op.getarg(0)) + base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) scale_box = op.getarg(3) From pypy.commits at gmail.com Tue Jan 5 07:40:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Jan 2016 04:40:37 -0800 (PST) Subject: [pypy-commit] pypy exctrans: simplify code: always patch the graph in-place in funcgen Message-ID: <568bb9c5.0357c20a.f7afb.ffffb9ac@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81577:6b9bb2fbc629 Date: 2016-01-05 13:39 +0100 http://bitbucket.org/pypy/pypy/changeset/6b9bb2fbc629/ Log: simplify code: always patch the graph in-place in funcgen diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -92,17 +92,14 @@ def name(self, cname): #virtual return cname - def patch_graph(self, copy_graph): + def patch_graph(self): graph = self.graph if self.db.gctransformer and self.db.gctransformer.inline: - if copy_graph: - graph = copygraph(graph, shallow=True) self.db.gctransformer.inline_helpers(graph) return graph def implementation_begin(self): - self.oldgraph = self.graph - self.graph = self.patch_graph(copy_graph=True) + self.patch_graph() SSI_to_SSA(self.graph) self.collect_var_and_types() self.blocknum = {} @@ -128,8 +125,6 @@ self.vars = None self.blocknum = None self.innerloops = None - self.graph = self.oldgraph - del self.oldgraph def argnames(self): return [LOCALVAR % v.name for v in self.graph.getargs()] diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -205,7 +205,7 @@ for node in db.containerlist: if hasattr(node, 'funcgens'): for funcgen in node.funcgens: - funcgen.patch_graph(copy_graph=False) + funcgen.patch_graph() return db def generate_source(self, db=None, defines={}, exe_name=None): From pypy.commits at gmail.com Tue Jan 5 07:51:28 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 04:51:28 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: int_lshift is a logical one, but up to now emitted an arithmetic shift, this makes the test_basic of the s390x fully passing! Message-ID: <568bbc50.6adec20a.ad5ea.ffffbda0@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81578:fec1779a7628 Date: 2016-01-05 13:50 +0100 http://bitbucket.org/pypy/pypy/changeset/fec1779a7628/ Log: int_lshift is a logical one, but up to now emitted an arithmetic shift, this makes the test_basic of the s390x fully passing! diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -152,7 +152,7 @@ emit_int_xor = gen_emit_rr_or_rpool("XGR", "XG") emit_int_rshift = gen_emit_shift("SRAG") - emit_int_lshift = gen_emit_shift("SLAG") + emit_int_lshift = gen_emit_shift("SLLG") emit_uint_rshift = gen_emit_shift("SRLG") emit_int_le = gen_emit_cmp_op(c.LE) From pypy.commits at gmail.com Tue Jan 5 08:46:30 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 05:46:30 -0800 (PST) Subject: [pypy-commit] pypy default: Add __pypy__.decode_long(), an app-level interface to Message-ID: <568bc936.a453c20a.c8259.1b6a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81579:af91853285d1 Date: 2016-01-05 13:52 +0100 http://bitbucket.org/pypy/pypy/changeset/af91853285d1/ Log: Add __pypy__.decode_long(), an app-level interface to rbigint.frombytes(). Use it in the pickle.py module. diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') From pypy.commits at gmail.com Tue Jan 5 08:46:32 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 05:46:32 -0800 (PST) Subject: [pypy-commit] pypy default: Fix the complexity in cPickle.py too (there's a mostly-duplicate?) Message-ID: <568bc938.913bc20a.d29ab.ffffe2f0@mx.google.com> Author: Armin Rigo Branch: Changeset: r81580:256ca06a4fb1 Date: 2016-01-05 13:50 +0000 http://bitbucket.org/pypy/pypy/changeset/256ca06a4fb1/ Log: Fix the complexity in cPickle.py too (there's a mostly-duplicate?) diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() From pypy.commits at gmail.com Tue Jan 5 09:01:31 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:01:31 -0800 (PST) Subject: [pypy-commit] pypy ec-keepalive: ready for merge Message-ID: <568bccbb.84ab1c0a.9f5d.3c69@mx.google.com> Author: Armin Rigo Branch: ec-keepalive Changeset: r81581:cc682a90f3e2 Date: 2016-01-05 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/cc682a90f3e2/ Log: ready for merge From pypy.commits at gmail.com Tue Jan 5 09:01:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:01:37 -0800 (PST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <568bccc1.878e1c0a.dc3d7.3c50@mx.google.com> Author: Armin Rigo Branch: Changeset: r81584:d0004489a9cc Date: 2016-01-05 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/d0004489a9cc/ Log: merge heads diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() From pypy.commits at gmail.com Tue Jan 5 09:01:33 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:01:33 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge ec-keepalive Message-ID: <568bccbd.465fc20a.c35be.493f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81582:12f30e098f24 Date: 2016-01-05 14:59 +0100 http://bitbucket.org/pypy/pypy/changeset/12f30e098f24/ Log: hg merge ec-keepalive - fix rthread so that, at least with framework GCs, the objects stored as threadlocals don't force a minor collection and are kept alive automatically until the thread finishes (or a different object replaces it, of course). - use that to optimize massively the case where, in a new C-created thread, we keep invoking short-running Python callbacks. diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,17 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_highlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - RPython_StartupCode() diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -335,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,20 +381,41 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). + """ + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) @@ -399,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -6,7 +6,8 @@ prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, - setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) + setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, + fetch_translated_config) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -439,6 +440,13 @@ res = self.interpret(f, [42]) assert res == 84 + def test_fetch_translated_config(self): + assert fetch_translated_config() is None + def f(): + return fetch_translated_config().translation.continuation + res = self.interpret(f, []) + assert res is False + def test_specialize_decorator(): def f(): diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -1,6 +1,7 @@ import gc, time from rpython.rlib.rthread import * from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib import objectmodel from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py @@ -240,3 +241,60 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + + def test_tlref_keepalive(self, no__thread=True): + import weakref + from rpython.config.translationoption import SUPPORT__THREAD + + if not (SUPPORT__THREAD or no__thread): + py.test.skip("no __thread support here") + + class FooBar(object): + pass + t = ThreadLocalReference(FooBar) + + def tset(): + x1 = FooBar() + t.set(x1) + return weakref.ref(x1) + tset._dont_inline_ = True + + class WrFromThread: + pass + wr_from_thread = WrFromThread() + + def f(): + config = objectmodel.fetch_translated_config() + assert t.automatic_keepalive(config) is True + wr = tset() + import gc; gc.collect() # 'x1' should not be collected + x2 = t.get() + assert x2 is not None + assert wr() is not None + assert wr() is x2 + return wr + + def thread_entry_point(): + wr = f() + wr_from_thread.wr = wr + wr_from_thread.seen = True + + def main(): + wr_from_thread.seen = False + start_new_thread(thread_entry_point, ()) + wr1 = f() + time.sleep(0.5) + assert wr_from_thread.seen is True + wr2 = wr_from_thread.wr + import gc; gc.collect() # wr2() should be collected here + assert wr1() is not None # this thread, still running + assert wr2() is None # other thread, not running any more + return 42 + + extra_options = {'no__thread': no__thread, 'shared': True} + fn = self.getcompiled(main, [], extra_options=extra_options) + res = fn() + assert res == 42 + + def test_tlref_keepalive__thread(self): + self.test_tlref_keepalive(no__thread=False) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,13 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_acquire(self, prev): + raise NotImplementedError + def op_threadlocalref_release(self, prev): + raise NotImplementedError + def op_threadlocalref_enum(self, prev): + raise NotImplementedError + # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -545,8 +545,11 @@ 'getslice': LLOp(canraise=(Exception,)), 'check_and_clear_exc': LLOp(), - 'threadlocalref_addr': LLOp(sideeffects=False), # get (or make) addr of tl + 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_acquire': LLOp(), # lock for enum + 'threadlocalref_release': LLOp(), # lock for enum + 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ 'debug_view': LLOp(), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -733,6 +733,9 @@ print >> f, 'struct pypy_threadlocal_s {' print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' + print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' + # note: if the four fixed fields above are changed, you need + # to adapt threadlocal.c's linkedlist_head declaration too for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,24 @@ # include #endif +void rpython_startup_code(void) +{ +#ifdef RPY_WITH_GIL + RPyGilAcquire(); +#endif +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; +#endif + pypy_asm_stack_bottom(); + RPython_StartupCode(); +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter--; +#endif +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif +} + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -48,7 +48,7 @@ } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - rpy_fastgil = 0; + lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -231,10 +231,19 @@ return (result != WAIT_TIMEOUT); } -#define mutex1_t mutex2_t -#define mutex1_init mutex2_init -#define mutex1_lock mutex2_lock -#define mutex1_unlock mutex2_unlock +typedef CRITICAL_SECTION mutex1_t; + +static inline void mutex1_init(mutex1_t *mutex) { + InitializeCriticalSection(mutex); +} + +static inline void mutex1_lock(mutex1_t *mutex) { + EnterCriticalSection(mutex); +} + +static inline void mutex1_unlock(mutex1_t *mutex) { + LeaveCriticalSection(mutex); +} //#define lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -38,3 +38,4 @@ #else #define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif +#define lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -81,3 +81,4 @@ #define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define lock_release(ptr) __sync_lock_release(ptr) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,20 +3,99 @@ #include #include #include -#ifndef _WIN32 -# include +#include +#include "src/threadlocal.h" +#include "src/thread.h" + + +/* this is a spin-lock that must be acquired around each doubly-linked-list + manipulation (because such manipulations can occur without the GIL) */ +static long pypy_threadlocal_lock = 0; + +static int check_valid(void); + +void _RPython_ThreadLocals_Acquire(void) { + while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } + assert(check_valid()); +} +void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); + lock_release(&pypy_threadlocal_lock); +} + + +pthread_key_t pypy_threadlocal_key +#ifdef _WIN32 += TLS_OUT_OF_INDEXES #endif -#include "src/threadlocal.h" +; +static struct pypy_threadlocal_s linkedlist_head = { + -1, /* ready */ + NULL, /* stack_end */ + &linkedlist_head, /* prev */ + &linkedlist_head }; /* next */ + +static int check_valid(void) +{ + struct pypy_threadlocal_s *prev, *cur; + prev = &linkedlist_head; + while (1) { + cur = prev->next; + assert(cur->prev == prev); + if (cur == &linkedlist_head) + break; + assert(cur->ready == 42); + assert(cur->next != cur); + prev = cur; + } + assert(cur->ready == -1); + return 1; +} + +static void cleanup_after_fork(void) +{ + /* assume that at most one pypy_threadlocal_s survived, the current one */ + struct pypy_threadlocal_s *cur; +#ifdef USE___THREAD + cur = &pypy_threadlocal; +#else + cur = (struct pypy_threadlocal_s *)_RPy_ThreadLocals_Get(); +#endif + if (cur && cur->ready == 42) { + cur->next = cur->prev = &linkedlist_head; + linkedlist_head.next = linkedlist_head.prev = cur; + } + else { + linkedlist_head.next = linkedlist_head.prev = &linkedlist_head; + } + _RPython_ThreadLocals_Release(); +} + + +struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) +{ + if (prev == NULL) + prev = &linkedlist_head; + if (prev->next == &linkedlist_head) + return NULL; + return prev->next; +} static void _RPy_ThreadLocals_Init(void *p) { + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + struct pypy_threadlocal_s *oldnext; memset(p, 0, sizeof(struct pypy_threadlocal_s)); + #ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; + tls->p_errno = &errno; #endif #ifdef RPY_TLOFS_thread_ident - ((struct pypy_threadlocal_s *)p)->thread_ident = + tls->thread_ident = # ifdef _WIN32 GetCurrentThreadId(); # else @@ -26,58 +105,80 @@ where it is not the case are rather old nowadays. */ # endif #endif - ((struct pypy_threadlocal_s *)p)->ready = 42; + _RPython_ThreadLocals_Acquire(); + oldnext = linkedlist_head.next; + tls->prev = &linkedlist_head; + tls->next = oldnext; + linkedlist_head.next = tls; + oldnext->prev = tls; + tls->ready = 42; + _RPython_ThreadLocals_Release(); } +static void threadloc_unlink(void *p) +{ + /* warning: this can be called at completely random times without + the GIL. */ + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + _RPython_ThreadLocals_Acquire(); + if (tls->ready == 42) { + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; + } + _RPython_ThreadLocals_Release(); +#ifndef USE___THREAD + free(p); +#endif +} -/* ------------------------------------------------------------ */ -#ifdef USE___THREAD -/* ------------------------------------------------------------ */ +#ifdef _WIN32 +/* xxx Defines a DllMain() function. It's horrible imho: it only + works if we happen to compile a DLL (not a EXE); and of course you + get link-time errors if two files in the same DLL do the same. + There are some alternatives known, but they are horrible in other + ways (e.g. using undocumented behavior). This seems to be the + simplest, but feel free to fix if you need that. - -/* in this situation, we always have one full 'struct pypy_threadlocal_s' - available, managed by gcc. */ -__thread struct pypy_threadlocal_s pypy_threadlocal; + For this reason we have the line 'not _win32 or config.translation.shared' + in rpython.rlib.rthread. +*/ +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + switch (reason_for_call) { + case DLL_THREAD_DETACH: + if (pypy_threadlocal_key != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(pypy_threadlocal_key); + if (p != NULL) { + TlsSetValue(pypy_threadlocal_key, NULL); + threadloc_unlink(p); + } + } + break; + default: + break; + } + return TRUE; +} +#endif void RPython_ThreadLocals_ProgramInit(void) { - _RPy_ThreadLocals_Init(&pypy_threadlocal); -} - -char *_RPython_ThreadLocals_Build(void) -{ - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); - _RPy_ThreadLocals_Init(&pypy_threadlocal); - return (char *)&pypy_threadlocal; -} - -void RPython_ThreadLocals_ThreadDie(void) -{ - memset(&pypy_threadlocal, 0xDD, - sizeof(struct pypy_threadlocal_s)); /* debug */ - pypy_threadlocal.ready = 0; -} - - -/* ------------------------------------------------------------ */ -#else -/* ------------------------------------------------------------ */ - - -/* this is the case where the 'struct pypy_threadlocal_s' is allocated - explicitly, with malloc()/free(), and attached to (a single) thread- - local key using the API of Windows or pthread. */ - -pthread_key_t pypy_threadlocal_key; - - -void RPython_ThreadLocals_ProgramInit(void) -{ + /* Initialize the pypy_threadlocal_key, together with a destructor + that will be called every time a thread shuts down (if there is + a non-null thread-local value). This is needed even in the + case where we use '__thread' below, for the destructor. + */ + assert(pypy_threadlocal_lock == 0); #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) #else - if (pthread_key_create(&pypy_threadlocal_key, NULL) != 0) + if (pthread_key_create(&pypy_threadlocal_key, threadloc_unlink) != 0) #endif { fprintf(stderr, "Internal RPython error: " @@ -85,8 +186,53 @@ abort(); } _RPython_ThreadLocals_Build(); + +#ifndef _WIN32 + pthread_atfork(_RPython_ThreadLocals_Acquire, + _RPython_ThreadLocals_Release, + cleanup_after_fork); +#endif } + +/* ------------------------------------------------------------ */ +#ifdef USE___THREAD +/* ------------------------------------------------------------ */ + + +/* in this situation, we always have one full 'struct pypy_threadlocal_s' + available, managed by gcc. */ +__thread struct pypy_threadlocal_s pypy_threadlocal; + +char *_RPython_ThreadLocals_Build(void) +{ + RPyAssert(pypy_threadlocal.ready == 0, "unclean thread-local"); + _RPy_ThreadLocals_Init(&pypy_threadlocal); + + /* we also set up &pypy_threadlocal as a POSIX thread-local variable, + because we need the destructor behavior. */ + pthread_setspecific(pypy_threadlocal_key, (void *)&pypy_threadlocal); + + return (char *)&pypy_threadlocal; +} + +void RPython_ThreadLocals_ThreadDie(void) +{ + pthread_setspecific(pypy_threadlocal_key, NULL); + threadloc_unlink(&pypy_threadlocal); +} + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +/* this is the case where the 'struct pypy_threadlocal_s' is allocated + explicitly, with malloc()/free(), and attached to (a single) thread- + local key using the API of Windows or pthread. */ + + char *_RPython_ThreadLocals_Build(void) { void *p = malloc(sizeof(struct pypy_threadlocal_s)); @@ -105,8 +251,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - memset(p, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ - free(p); + threadloc_unlink(p); /* includes free(p) */ } } diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -13,14 +13,24 @@ to die. */ RPY_EXTERN void RPython_ThreadLocals_ThreadDie(void); -/* There are two llops: 'threadlocalref_addr' and 'threadlocalref_make'. - They both return the address of the thread-local structure (of the - C type 'struct pypy_threadlocal_s'). The difference is that - OP_THREADLOCALREF_MAKE() checks if we have initialized this thread- - local structure in the current thread, and if not, calls the following - helper. */ +/* 'threadlocalref_addr' returns the address of the thread-local + structure (of the C type 'struct pypy_threadlocal_s'). It first + checks if we have initialized this thread-local structure in the + current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN void _RPython_ThreadLocals_Acquire(void); +RPY_EXTERN void _RPython_ThreadLocals_Release(void); + +/* Must acquire/release the thread-local lock around a series of calls + to the following function */ +RPY_EXTERN struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); + +#define OP_THREADLOCALREF_ACQUIRE(r) _RPython_ThreadLocals_Acquire() +#define OP_THREADLOCALREF_RELEASE(r) _RPython_ThreadLocals_Release() +#define OP_THREADLOCALREF_ENUM(p, r) r = _RPython_ThreadLocals_Enum(p) + /* ------------------------------------------------------------ */ #ifdef USE___THREAD @@ -29,6 +39,8 @@ /* Use the '__thread' specifier, so far only on Linux */ +#include + RPY_EXTERN __thread struct pypy_threadlocal_s pypy_threadlocal; #define OP_THREADLOCALREF_ADDR(r) \ @@ -64,8 +76,6 @@ # define _RPy_ThreadLocals_Set(x) pthread_setspecific(pypy_threadlocal_key, x) #endif -RPY_EXTERN pthread_key_t pypy_threadlocal_key; - #define OP_THREADLOCALREF_ADDR(r) \ do { \ @@ -87,6 +97,9 @@ /* ------------------------------------------------------------ */ +RPY_EXTERN pthread_key_t pypy_threadlocal_key; + + /* only for the fall-back path in the JIT */ #define OP_THREADLOCALREF_GET_NONCONST(RESTYPE, offset, r) \ do { \ diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py --- a/rpython/translator/c/test/test_boehm.py +++ b/rpython/translator/c/test/test_boehm.py @@ -23,6 +23,7 @@ class AbstractGCTestClass(object): gcpolicy = "boehm" use_threads = False + extra_options = {} # deal with cleanups def setup_method(self, meth): @@ -33,8 +34,10 @@ #print "CLEANUP" self._cleanups.pop()() - def getcompiled(self, func, argstypelist=[], annotatorpolicy=None): - return compile(func, argstypelist, gcpolicy=self.gcpolicy, thread=self.use_threads) + def getcompiled(self, func, argstypelist=[], annotatorpolicy=None, + extra_options={}): + return compile(func, argstypelist, gcpolicy=self.gcpolicy, + thread=self.use_threads, **extra_options) class TestUsingBoehm(AbstractGCTestClass): diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -96,6 +96,8 @@ continue if name == 'pypy_debug_file': # ok to export this one continue + if name == 'rpython_startup_code': # ok for this one too + continue if 'pypy' in name.lower() or 'rpy' in name.lower(): raise Exception("Unexpected exported name %r. " "What is likely missing is RPY_EXTERN before the " diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -203,9 +203,8 @@ try: points = secondary_entrypoints[key] except KeyError: - raise KeyError( - "Entrypoints not found. I only know the keys %r." % - (", ".join(secondary_entrypoints.keys()), )) + raise KeyError("Entrypoint %r not found (not in %r)" % + (key, secondary_entrypoints.keys())) self.secondary_entrypoints.extend(points) self.translator.driver_instrument_result = self.instrument_result From pypy.commits at gmail.com Tue Jan 5 09:01:35 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:01:35 -0800 (PST) Subject: [pypy-commit] pypy default: Document the branch Message-ID: <568bccbf.520e1c0a.322f7.3982@mx.google.com> Author: Armin Rigo Branch: Changeset: r81583:e98964a22151 Date: 2016-01-05 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/e98964a22151/ Log: Document the branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -103,3 +103,9 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) From pypy.commits at gmail.com Tue Jan 5 09:05:00 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:05:00 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge default (including the ec-keepalive branch) Message-ID: <568bcd8c.9a6f1c0a.7b609.3545@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81585:7b81fa1c3fa9 Date: 2016-01-05 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/7b81fa1c3fa9/ Log: hg merge default (including the ec-keepalive branch) diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -103,3 +103,9 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2794,8 +2794,10 @@ def parse_digit_string(parser): # helper for fromstr + base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) a = rbigint() - base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2813,52 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + from rpython.rlib.rstring import ParseStringError + + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -394,11 +394,13 @@ def _trace_tlref(gc, obj, callback, arg): p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) while True: p = llop.threadlocalref_enum(llmemory.Address, p) if not p: break gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) _lambda_trace_tlref = lambda: _trace_tlref TRACETLREF = lltype.GcStruct('TRACETLREF') _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) @@ -407,9 +409,12 @@ def automatic_keepalive(config): """Returns True if translated with a GC that keeps alive the set() value until the end of the thread. Returns False - if you need to keep it alive yourself. + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). """ - return config.translation.gctransformer == "framework" + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", @@ -418,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -825,7 +825,19 @@ def __init__(self, base, sign, digits): self.base = base self.sign = sign - self.next_digit = iter(digits + [-1]).next + self.i = 0 + self._digits = digits + def next_digit(self): + i = self.i + if i == len(self._digits): + return -1 + self.i = i + 1 + return self._digits[i] + def prev_digit(self): + i = self.i - 1 + assert i >= 0 + self.i = i + return self._digits[i] x = parse_digit_string(Parser(10, 1, [6])) assert x.eq(rbigint.fromint(6)) x = parse_digit_string(Parser(10, 1, [6, 2, 3])) @@ -847,6 +859,16 @@ x = parse_digit_string(Parser(7, -1, [0, 0, 0])) assert x.tobool() is False + for base in [2, 4, 8, 16, 32]: + for inp in [[0], [1], [1, 0], [0, 1], [1, 0, 1], [1, 0, 0, 1], + [1, 0, 0, base-1, 0, 1], [base-1, 1, 0, 0, 0, 1, 0], + [base-1]]: + inp = inp * 97 + x = parse_digit_string(Parser(base, -1, inp)) + num = sum(inp[i] * (base ** (len(inp)-1-i)) + for i in range(len(inp))) + assert x.eq(rbigint.fromlong(-num)) + BASE = 2 ** SHIFT diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,10 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_acquire(self, prev): + raise NotImplementedError + def op_threadlocalref_release(self, prev): + raise NotImplementedError def op_threadlocalref_enum(self, prev): raise NotImplementedError diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -547,6 +547,8 @@ 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_acquire': LLOp(), # lock for enum + 'threadlocalref_release': LLOp(), # lock for enum 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -48,7 +48,7 @@ } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - rpy_fastgil = 0; + lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -231,10 +231,19 @@ return (result != WAIT_TIMEOUT); } -#define mutex1_t mutex2_t -#define mutex1_init mutex2_init -#define mutex1_lock mutex2_lock -#define mutex1_unlock mutex2_unlock +typedef CRITICAL_SECTION mutex1_t; + +static inline void mutex1_init(mutex1_t *mutex) { + InitializeCriticalSection(mutex); +} + +static inline void mutex1_lock(mutex1_t *mutex) { + EnterCriticalSection(mutex); +} + +static inline void mutex1_unlock(mutex1_t *mutex) { + LeaveCriticalSection(mutex); +} //#define lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -38,3 +38,4 @@ #else #define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif +#define lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -81,3 +81,4 @@ #define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define lock_release(ptr) __sync_lock_release(ptr) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,7 +3,27 @@ #include #include #include +#include #include "src/threadlocal.h" +#include "src/thread.h" + + +/* this is a spin-lock that must be acquired around each doubly-linked-list + manipulation (because such manipulations can occur without the GIL) */ +static long pypy_threadlocal_lock = 0; + +static int check_valid(void); + +void _RPython_ThreadLocals_Acquire(void) { + while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } + assert(check_valid()); +} +void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); + lock_release(&pypy_threadlocal_lock); +} pthread_key_t pypy_threadlocal_key @@ -18,6 +38,43 @@ &linkedlist_head, /* prev */ &linkedlist_head }; /* next */ +static int check_valid(void) +{ + struct pypy_threadlocal_s *prev, *cur; + prev = &linkedlist_head; + while (1) { + cur = prev->next; + assert(cur->prev == prev); + if (cur == &linkedlist_head) + break; + assert(cur->ready == 42); + assert(cur->next != cur); + prev = cur; + } + assert(cur->ready == -1); + return 1; +} + +static void cleanup_after_fork(void) +{ + /* assume that at most one pypy_threadlocal_s survived, the current one */ + struct pypy_threadlocal_s *cur; +#ifdef USE___THREAD + cur = &pypy_threadlocal; +#else + cur = (struct pypy_threadlocal_s *)_RPy_ThreadLocals_Get(); +#endif + if (cur && cur->ready == 42) { + cur->next = cur->prev = &linkedlist_head; + linkedlist_head.next = linkedlist_head.prev = cur; + } + else { + linkedlist_head.next = linkedlist_head.prev = &linkedlist_head; + } + _RPython_ThreadLocals_Release(); +} + + struct pypy_threadlocal_s * _RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) { @@ -48,23 +105,29 @@ where it is not the case are rather old nowadays. */ # endif #endif + _RPython_ThreadLocals_Acquire(); oldnext = linkedlist_head.next; tls->prev = &linkedlist_head; tls->next = oldnext; linkedlist_head.next = tls; oldnext->prev = tls; tls->ready = 42; + _RPython_ThreadLocals_Release(); } static void threadloc_unlink(void *p) { + /* warning: this can be called at completely random times without + the GIL. */ struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + _RPython_ThreadLocals_Acquire(); if (tls->ready == 42) { - tls->ready = 0; tls->next->prev = tls->prev; tls->prev->next = tls->next; memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; } + _RPython_ThreadLocals_Release(); #ifndef USE___THREAD free(p); #endif @@ -77,7 +140,10 @@ There are some alternatives known, but they are horrible in other ways (e.g. using undocumented behavior). This seems to be the simplest, but feel free to fix if you need that. - */ + + For this reason we have the line 'not _win32 or config.translation.shared' + in rpython.rlib.rthread. +*/ BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD reason_for_call, LPVOID reserved) @@ -107,6 +173,7 @@ a non-null thread-local value). This is needed even in the case where we use '__thread' below, for the destructor. */ + assert(pypy_threadlocal_lock == 0); #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) @@ -119,6 +186,12 @@ abort(); } _RPython_ThreadLocals_Build(); + +#ifndef _WIN32 + pthread_atfork(_RPython_ThreadLocals_Acquire, + _RPython_ThreadLocals_Release, + cleanup_after_fork); +#endif } @@ -133,7 +206,7 @@ char *_RPython_ThreadLocals_Build(void) { - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); + RPyAssert(pypy_threadlocal.ready == 0, "unclean thread-local"); _RPy_ThreadLocals_Init(&pypy_threadlocal); /* we also set up &pypy_threadlocal as a POSIX thread-local variable, diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -19,11 +19,17 @@ current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN void _RPython_ThreadLocals_Acquire(void); +RPY_EXTERN void _RPython_ThreadLocals_Release(void); + +/* Must acquire/release the thread-local lock around a series of calls + to the following function */ RPY_EXTERN struct pypy_threadlocal_s * _RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); -#define OP_THREADLOCALREF_ENUM(p, r) \ - r = _RPython_ThreadLocals_Enum(p) +#define OP_THREADLOCALREF_ACQUIRE(r) _RPython_ThreadLocals_Acquire() +#define OP_THREADLOCALREF_RELEASE(r) _RPython_ThreadLocals_Release() +#define OP_THREADLOCALREF_ENUM(p, r) r = _RPython_ThreadLocals_Enum(p) /* ------------------------------------------------------------ */ From pypy.commits at gmail.com Tue Jan 5 09:05:02 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 05 Jan 2016 06:05:02 -0800 (PST) Subject: [pypy-commit] pypy default: Note Message-ID: <568bcd8e.a658c20a.49ee6.ffffccce@mx.google.com> Author: Armin Rigo Branch: Changeset: r81586:26ede037cdb3 Date: 2016-01-05 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/26ede037cdb3/ Log: Note diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -108,4 +108,5 @@ Optimize the case where, in a new C-created thread, we keep invoking short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. From pypy.commits at gmail.com Tue Jan 5 09:28:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 06:28:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: do not use a register that might be allocated in copy_content! added comment to clarify. test_string is now passing Message-ID: <568bd328.0c2e1c0a.999da.3f5e@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81587:481b32d406e5 Date: 2016-01-05 15:28 +0100 http://bitbucket.org/pypy/pypy/changeset/481b32d406e5/ Log: do not use a register that might be allocated in copy_content! added comment to clarify. test_string is now passing diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -890,16 +890,19 @@ if src_ofs.is_imm(): value = src_ofs.value << scale if check_imm_value(value): - self.mc.LGR(dst, src_ptr) - self.mc.AGHI(dst, l.imm(value)) + if dst is not src_ptr: + self.mc.LGR(dst, src_ptr) + if value != 0: + self.mc.AGHI(dst, l.imm(value)) else: self.mc.load_imm(dst, value) self.mc.AGR(dst, src_ptr) elif scale == 0: - self.mc.LGR(dst, src_ptr) + if dst is not src_ptr: + self.mc.LGR(dst, src_ptr) self.mc.AGR(dst, src_ofs) else: - self.mc.SLAG(dst, src_ofs, l.addr(scale)) + self.mc.SLLG(dst, src_ofs, l.addr(scale)) self.mc.AGR(dst, src_ptr) def _emit_copycontent(self, arglocs, is_unicode): @@ -918,8 +921,11 @@ assert itemsize == 1 scale = 0 - self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale) - self._emit_load_for_copycontent(r.r2, dst_ptr_loc, dst_ofs_loc, scale) + self._emit_load_for_copycontent(r.SCRATCH, src_ptr_loc, src_ofs_loc, scale) + self._emit_load_for_copycontent(r.SCRATCH2, dst_ptr_loc, dst_ofs_loc, scale) + # + # DO NOT USE r2-r6 before this line! + # either of the parameter (e.g. str_ptr_loc, ...) locations might be allocated if length_loc.is_imm(): length = length_loc.getint() @@ -930,9 +936,12 @@ elif length_loc is not r.r4: self.mc.LGR(r.r4, length_loc) - self.mc.LGR(r.r3, r.r0) - self.mc.AGHI(r.r3, l.imm(basesize)) - self.mc.AGHI(r.r2, l.imm(basesize)) + self.mc.LGR(r.r3, r.SCRATCH) + self.mc.LGR(r.r2, r.SCRATCH2) + if basesize != 0: + self.mc.AGHI(r.r3, l.imm(basesize)) + if basesize != 0: + self.mc.AGHI(r.r2, l.imm(basesize)) self.mc.push_std_frame() self.mc.load_imm(self.mc.RAW_CALL_REG, self.memcpy_addr) From pypy.commits at gmail.com Tue Jan 5 09:36:51 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 06:36:51 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added test recursive, four tests to fix Message-ID: <568bd503.9a6f1c0a.7b609.41bf@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81588:f59c2e896924 Date: 2016-01-05 15:35 +0100 http://bitbucket.org/pypy/pypy/changeset/f59c2e896924/ Log: added test recursive, four tests to fix added raw memory tests (pass) diff --git a/rpython/jit/backend/zarch/test/test_rawmem.py b/rpython/jit/backend/zarch/test/test_rawmem.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_rawmem.py @@ -0,0 +1,9 @@ + +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test.test_rawmem import RawMemTests + + +class TestRawMem(JitZARCHMixin, RawMemTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_rawmem.py + pass diff --git a/rpython/jit/backend/zarch/test/test_recursive.py b/rpython/jit/backend/zarch/test/test_recursive.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_recursive.py @@ -0,0 +1,7 @@ +from rpython.jit.metainterp.test.test_recursive import RecursiveTests +from rpython.jit.backend.zarch.test.support import JitZARCHMixin + +class TestRecursive(JitZARCHMixin, RecursiveTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_recursive.py + pass diff --git a/rpython/jit/backend/zarch/test/test_string.py b/rpython/jit/backend/zarch/test/test_string.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_string.py @@ -0,0 +1,13 @@ +import py +from rpython.jit.metainterp.test import test_string +from rpython.jit.backend.zarch.test.support import JitZARCHMixin + +class TestString(JitZARCHMixin, test_string.TestLLtype): + # for the individual tests see + # ====> ../../../metainterp/test/test_string.py + pass + +class TestUnicode(JitZARCHMixin, test_string.TestLLtypeUnicode): + # for the individual tests see + # ====> ../../../metainterp/test/test_string.py + pass From pypy.commits at gmail.com Tue Jan 5 09:50:28 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 06:50:28 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added tests: loop_unroll, virtualizable, virtualref Message-ID: <568bd834.cdb81c0a.934f5.43ef@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81589:15095a57b881 Date: 2016-01-05 15:49 +0100 http://bitbucket.org/pypy/pypy/changeset/15095a57b881/ Log: added tests: loop_unroll, virtualizable, virtualref diff --git a/rpython/jit/backend/zarch/test/test_loop_unroll.py b/rpython/jit/backend/zarch/test/test_loop_unroll.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_loop_unroll.py @@ -0,0 +1,8 @@ +import py +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test import test_loop_unroll + +class TestLoopSpec(JitZARCHMixin, test_loop_unroll.LoopUnrollTest): + # for the individual tests see + # ====> ../../../metainterp/test/test_loop.py + pass diff --git a/rpython/jit/backend/zarch/test/test_virtualizable.py b/rpython/jit/backend/zarch/test/test_virtualizable.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_virtualizable.py @@ -0,0 +1,8 @@ + +import py +from rpython.jit.metainterp.test.test_virtualizable import ImplicitVirtualizableTests +from rpython.jit.backend.zarch.test.support import JitZARCHMixin + +class TestVirtualizable(JitZARCHMixin, ImplicitVirtualizableTests): + def test_blackhole_should_not_reenter(self): + py.test.skip("Assertion error & llinterp mess") diff --git a/rpython/jit/backend/zarch/test/test_virtualref.py b/rpython/jit/backend/zarch/test/test_virtualref.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_virtualref.py @@ -0,0 +1,8 @@ + +from rpython.jit.metainterp.test.test_virtualref import VRefTests +from rpython.jit.backend.zarch.test.support import JitZARCHMixin + +class TestVRef(JitZARCHMixin, VRefTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_virtualref.py + pass From pypy.commits at gmail.com Tue Jan 5 10:04:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 05 Jan 2016 07:04:59 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: and yet some more tests: del, dict, exception and fficall Message-ID: <568bdb9b.ca061c0a.cd9b4.4b67@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81590:39602618dc34 Date: 2016-01-05 16:04 +0100 http://bitbucket.org/pypy/pypy/changeset/39602618dc34/ Log: and yet some more tests: del, dict, exception and fficall diff --git a/rpython/jit/backend/zarch/test/test_del.py b/rpython/jit/backend/zarch/test/test_del.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_del.py @@ -0,0 +1,8 @@ + +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test.test_del import DelTests + +class TestDel(JitZARCHMixin, DelTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_del.py + pass diff --git a/rpython/jit/backend/zarch/test/test_dict.py b/rpython/jit/backend/zarch/test/test_dict.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_dict.py @@ -0,0 +1,9 @@ + +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test.test_dict import DictTests + + +class TestDict(JitZARCHMixin, DictTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_dict.py + pass diff --git a/rpython/jit/backend/zarch/test/test_exception.py b/rpython/jit/backend/zarch/test/test_exception.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_exception.py @@ -0,0 +1,11 @@ + +import py +from rpython.jit.backend.zarch.test.support import JitZARCHMixin +from rpython.jit.metainterp.test.test_exception import ExceptionTests + +class TestExceptions(JitZARCHMixin, ExceptionTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_exception.py + + def test_bridge_from_interpreter_exc(self): + py.test.skip("Widening to trash") diff --git a/rpython/jit/backend/zarch/test/test_fficall.py b/rpython/jit/backend/zarch/test/test_fficall.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_fficall.py @@ -0,0 +1,23 @@ +import py +from rpython.jit.metainterp.test import test_fficall +from rpython.jit.backend.zarch.test.support import JitZARCHMixin + +class TestFfiCall(JitZARCHMixin, test_fficall.FfiCallTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_fficall.py + + def _add_libffi_types_to_ll2types_maybe(self): + # this is needed by test_guard_not_forced_fails, because it produces a + # loop which reads the value of types.* in a variable, then a guard + # fail and we switch to blackhole: the problem is that at this point + # the blackhole interp has a real integer, but it needs to convert it + # back to a lltype pointer (which is handled by ll2ctypes, deeply in + # the logic). The workaround is to teach ll2ctypes in advance which + # are the addresses of the various types.* structures. + # Try to comment this code out and run the test to see how it fails :) + from rpython.rtyper.lltypesystem import rffi, lltype, ll2ctypes + from rpython.rlib.jit_libffi import types + for key, value in types.__dict__.iteritems(): + if isinstance(value, lltype._ptr): + addr = rffi.cast(lltype.Signed, value) + ll2ctypes._int2obj[addr] = value From pypy.commits at gmail.com Tue Jan 5 11:24:22 2016 From: pypy.commits at gmail.com (stefanor) Date: Tue, 05 Jan 2016 08:24:22 -0800 (PST) Subject: [pypy-commit] pypy default: Expose SOABI in sysconfig Message-ID: <568bee36.2457c20a.d9372.fffffdac@mx.google.com> Author: Stefano Rivera Branch: Changeset: r81591:71b4bf53487c Date: 2016-01-05 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/71b4bf53487c/ Log: Expose SOABI in sysconfig Initially, for Debian, so that dh_pypy can use it to generate useful dependencies. (https://bugs.debian.org/803689) But also so that pip8 can use it, rather than making up its own SOABI. Fixes #2182 diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: From pypy.commits at gmail.com Tue Jan 5 11:40:18 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Jan 2016 08:40:18 -0800 (PST) Subject: [pypy-commit] pypy exctrans: hg merge default Message-ID: <568bf1f2.42cbc20a.18060.ffffabba@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81592:7bf2e87a1dfe Date: 2016-01-05 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/7bf2e87a1dfe/ Log: hg merge default diff too long, truncating to 2000 out of 6946 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.0 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,30 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) + +Then get the branch `stmgc-c8`_ of PyPy and run:: cd pypy/goal ../../rpython/bin/rpython -Ojit --stm - PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -114,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy @@ -44,6 +46,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -73,3 +78,35 @@ Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and turn them into regular RPython functions. Most RPython-compatible `os.*` functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -933,6 +933,8 @@ def test_dont_fold_equal_code_objects(self): yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float + yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()", + 'repr(x)', '(0.0, -0.0)') class AppTestCompiler: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.0" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -40,10 +40,9 @@ at least 8 bytes in size. """ from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -71,9 +70,7 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) rffi.stackcounter.stacks_counter -= 1 - before = rffi.aroundstate.before - if before: - before() + rgil.release() def get_ll_cffi_call_python(): diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -124,7 +124,7 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -602,6 +602,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -617,9 +618,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -692,9 +691,7 @@ pypy_debug_catch_fatal_exception() rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,14 +4,14 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -65,22 +65,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): @@ -378,6 +380,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,64 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", @@ -591,45 +643,92 @@ def test_binaryfunc(self): module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "l", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "l", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), }; - static PyNumberMethods foo_as_number; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), + }; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.tool.udir import udir -import os +import os, sys, py class AppTestMMap: spaceconfig = dict(usemodules=('mmap',)) @@ -8,6 +8,15 @@ def setup_class(cls): cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def setup_method(self, meth): + if getattr(meth, 'is_large', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") + def test_page_size(self): import mmap assert mmap.PAGESIZE > 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 5): + py.test.skip("re-enable me in version 1.5") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -64,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func @@ -523,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [] + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) return fcond - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) + # add the base offset + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + # + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) - ofs, size, sign = unpack_fielddescr(op.getdescr()) + nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - ofs_size = default_imm_size if size < 8 else VMEM_imm_size + ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: @@ -839,17 +827,12 @@ self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size)] + res_loc = self.force_allocate_reg(op) + return [base_loc, ofs_loc, res_loc, imm(nsize)] - prepare_op_getfield_gc_i = _prepare_op_getfield - prepare_op_getfield_gc_r = _prepare_op_getfield - prepare_op_getfield_gc_f = _prepare_op_getfield - prepare_op_getfield_raw_i = _prepare_op_getfield - prepare_op_getfield_raw_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield + prepare_op_gc_load_i = _prepare_op_gc_load + prepare_op_gc_load_r = _prepare_op_gc_load + prepare_op_gc_load_f = _prepare_op_gc_load def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() @@ -859,188 +842,38 @@ self.free_temp_vars() return [base_loc, value_loc] - def _prepare_op_getinteriorfield(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) + def prepare_op_gc_store_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[3].getint() == 1 # scale + ofs = boxes[4].getint() + size = boxes[5].getint() + assert check_imm_arg(ofs) + return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] + + def _prepare_op_gc_load_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[2].getint() == 1 # scale + ofs = boxes[3].getint() + nsize = boxes[4].getint() + assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] + res_loc = self.force_allocate_reg(op) + return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)] - prepare_op_getinteriorfield_gc_i = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_r = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_f = _prepare_op_getinteriorfield - - def prepare_op_setinteriorfield_gc(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - return [base_loc, index_loc, value_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - prepare_op_setinteriorfield_raw = prepare_op_setinteriorfield_gc - - def prepare_op_arraylen_gc(self, op, fcond): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - arg = op.getarg(0) - base_loc = self.make_sure_var_in_reg(arg) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_op_setarrayitem_gc(self, op, fcond): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(args[0], args) - value_loc = self.make_sure_var_in_reg(args[2], args) - ofs_loc = self.make_sure_var_in_reg(args[1], args) - assert check_imm_arg(ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] - prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc - prepare_op_raw_store = prepare_op_setarrayitem_gc - - def _prepare_op_getarrayitem(self, op, fcond): - boxes = op.getarglist() - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - assert check_imm_arg(ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] - - prepare_op_getarrayitem_gc_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_f = _prepare_op_getarrayitem From pypy.commits at gmail.com Tue Jan 5 11:40:20 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Jan 2016 08:40:20 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Call db.prepare_inline_helpers() from a slightly more logical location Message-ID: <568bf1f4.8a5a1c0a.fb76a.7035@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81593:405bc736b37f Date: 2016-01-05 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/405bc736b37f/ Log: Call db.prepare_inline_helpers() from a slightly more logical location diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -213,6 +213,7 @@ if db is None: db = self.build_database() + db.prepare_inline_helpers() pf = self.getentrypointptr() if self.modulename is None: self.modulename = uniquemodulename('testing') @@ -848,7 +849,6 @@ # sg = SourceGenerator(database) sg.set_strategy(targetdir, split) - database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) headers_to_precompile = sg.headers_to_precompile[:] headers_to_precompile.insert(0, incfilename) From pypy.commits at gmail.com Tue Jan 5 12:41:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 05 Jan 2016 09:41:29 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Call funcgen.patch_graph() before source generation rather than in the middle of it Message-ID: <568c0049.42b81c0a.cf3bf.ffff8e52@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81594:b72c57375aa0 Date: 2016-01-05 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/b72c57375aa0/ Log: Call funcgen.patch_graph() before source generation rather than in the middle of it diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -40,7 +40,7 @@ t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.generate_graphs() entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph @@ -69,7 +69,7 @@ return -x t = rtype(g, [int]) gg = graphof(t, g) - assert not CollectAnalyzer(t).analyze_direct_call(gg) + assert not CollectAnalyzer(t).analyze_direct_call(gg) def test_cancollect_external(): fext1 = rffi.llexternal('fext1', [], lltype.Void, releasegil=False) @@ -110,12 +110,12 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.generate_graphs() def test_no_collect_detection(): from rpython.rlib import rgc @@ -134,12 +134,13 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) + with py.test.raises(Exception) as f: + cbuild.generate_graphs() expected = "'no_collect' function can trigger collection: Author: Armin Rigo Branch: extradoc Changeset: r690:39785461a869 Date: 2016-01-05 19:13 +0100 http://bitbucket.org/pypy/pypy.org/changeset/39785461a869/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62641 of $105000 (59.7%) + $62669 of $105000 (59.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Tue Jan 5 16:19:19 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 05 Jan 2016 13:19:19 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add protective hack to work around PyString_Type missing a ob_type reference, give up writing a test to properly fix it Message-ID: <568c3357.41dfc20a.752aa.ffff9631@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81595:d32637ebd6d9 Date: 2016-01-05 01:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d32637ebd6d9/ Log: add protective hack to work around PyString_Type missing a ob_type reference, give up writing a test to properly fix it diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -514,7 +514,7 @@ pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: - # c_tp_print, c_tp_getattr, c_tp_setattr + # c_tp_print # XXX implement # c_tp_compare and the following fields (see http://docs.python.org/c-api/typeobj.html ) w_base = best_base(space, w_type.bases_w) @@ -605,9 +605,14 @@ finish_type_1(space, py_type) - w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + if py_type.c_ob_type: + w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) + else: + # Somehow the tp_base type is created with no ob_type, notably + # PyString_Type and PyBaseString_Type + # While this is a hack, cpython does it as well. + w_metatype = space.w_type - assert w_metatype # XXX in numpy initmultiarray, py_type.c_ob_type is 0 w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) track_reference(space, py_obj, w_obj) w_obj.__init__(space, py_type) From pypy.commits at gmail.com Tue Jan 5 16:19:21 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 05 Jan 2016 13:19:21 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: inherit tp_as... slots from base, need to test? Message-ID: <568c3359.cdb81c0a.934f5.ffffd3c5@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81596:26ff9bd62015 Date: 2016-01-05 23:15 +0200 http://bitbucket.org/pypy/pypy/changeset/26ff9bd62015/ Log: inherit tp_as... slots from base, need to test? diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -592,7 +592,6 @@ Creates an interpreter type from a PyTypeObject structure. """ # missing: - # inheriting tp_as_* slots # unsupported: # tp_mro, tp_subclasses py_type = rffi.cast(PyTypeObjectPtr, py_obj) @@ -619,6 +618,13 @@ w_obj.ready() finish_type_2(space, py_type, w_obj) + # inheriting tp_as_* slots + base = py_type.c_tp_base + if base: + if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number + if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence + if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping + if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer state = space.fromcache(RefcountState) state.non_heaptypes_w.append(w_obj) From pypy.commits at gmail.com Wed Jan 6 05:18:18 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 02:18:18 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: in-progress Message-ID: <568ce9ea.44e21c0a.897ff.ffff8ac9@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5591:d5d6783367e1 Date: 2016-01-06 11:18 +0100 http://bitbucket.org/pypy/extradoc/changeset/d5d6783367e1/ Log: in-progress diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst --- a/blog/draft/cffi-embedding.rst +++ b/blog/draft/cffi-embedding.rst @@ -6,44 +6,55 @@ Python programs, in a way that is both simple and that works across CPython 2.x and 3.x and PyPy. -We are now adding support for *embedding* Python inside non-Python -programs. This is traditionally done using the CPython C API: from C -code, you call ``Py_Initialize()`` and then some other functions like +The major news of CFFI 1.4, released last december, was that you can +now declare C functions with ``extern "Python"``, in the ``cdef()``. +These magic keywords make the function callable from C (where it is +defined automatically), but calling it will call some Python code +(which you attach with the ``@ffi.def_extern()`` decorator). This is +useful because it gives a more straightforward, faster and +libffi-independent way to write callbacks. For more details, see `the +documentation`_. + +You are, in effect, declaring a static family of C functions which +call Python code. The idea is to take pointers to them, and pass them +around to other C functions, as callbacks. However, the idea of a set +of C functions which call Python code opens another path: *embedding* +Python code inside non-Python programs. + +Embedding is traditionally done using the CPython C API: from C code, +you call ``Py_Initialize()`` and then some other functions like ``PyRun_SimpleString()``. In the simple cases it is, indeed, simple -enough; but it can become a more complicated story if you throw in -supporting application-dependent object types, and correctly running -on multiple threads, and so on. +enough; but it can become a complicated story if you throw in +supporting application-dependent object types; and a messy story if +you add correctly running on multiple threads, for example. -Moreover, this approach is specific to CPython (2.x or 3.x, which you -can do in a similar way). It does not work on PyPy, which has its own -smaller but very different `embedding API`_. +Moreover, this approach is specific to CPython (2.x or 3.x). It does +not work at all on PyPy, which has its own very different, minimal +`embedding API`_. -The new-and-coming thing about CFFI, meant as replacement of the above -solutions, is direct embedding support---and it does that with no -fixed API at all. The idea is to write some Python script with a -``cdef()`` which declares a number of ``extern "Python"`` functions. -When running the script, it creates the C source code and compiles it -to a dynamically-linked library (``.so`` on Linux). This is the same -as in the regular API-mode usage, and ``extern "Python"`` was -`introduced in CFFI 1.4`_. What is new is that these ``extern +The new-and-coming thing about CFFI 1.5, meant as replacement of the +above solutions, is direct embedding support---with no fixed API at +all. The idea is to write some Python script with a ``cdef()`` which +declares a number of ``extern "Python"`` functions. When running the +script, it creates the C source code and compiles it to a +dynamically-linked library (``.so`` on Linux). This is the same as in +the regular API-mode usage. What is new is that these ``extern "Python"`` can now also be *exported* from the ``.so``, in the C sense. You also give a bit of initialization-time Python code -directly in the script, which will be compiled into the ``.so`` -too. +directly in the script, which will be compiled into the ``.so`` too. -In other words, this library can now be used directly from any C -program (and it is still importable in Python). It exposes the C API -of your choice, which you specified with the ``extern "Python"`` -declarations. You can use it to make whatever custom API makes sense -in your particular case. You can even directly make a "plug-in" for -any program that supports them, just by exporting the API expected for -such plugins. +This library can now be used directly from any C program (and it is +still importable in Python). It exposes the C API of your choice, +which you specified with the ``extern "Python"`` declarations. You +can use it to make whatever custom API makes sense in your particular +case. You can even directly make a "plug-in" for any program that +supports them, just by exporting the API expected for such plugins. -This is still being finalized, but please try it out. (You can also see -`embedding.py`_ directly online for a quick glance.) These are the -instructions on Linux with CPython 2.7 (CPython 3.x and non-Linux -platforms are still a work in progress right now, but this should be -quickly fixed): +This is still being finalized, but please try it out. (You can also +see `embedding.py`_ directly online for a quick glance.) Here are +below the instructions on Linux with CPython 2.7 (CPython 3.x and +non-Linux platforms are still a work in progress right now, but this +should be quickly fixed): * get the branch ``static-callback-embedding`` of CFFI:: @@ -59,7 +70,7 @@ cd demo PYTHONPATH=.. python embedding.py -* run ``gcc`` to build the C sources---on Linux:: +* this produces ``_embedding_cffi.c``; run ``gcc`` to build it---on Linux:: gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 -I/usr/include/python2.7 @@ -75,22 +86,39 @@ Very similar steps can be followed on PyPy, but it requires the ``cffi-static-callback-embedding`` branch of PyPy, which you must -first translate from sources. +first translate from sources. The difference is only that you need to +adapt the first ``gcc`` command line: replace ``-lpython2.7`` with +``-lpypy-c`` and to fix the ``-I`` path (and possibly add a ``-L`` +path). Note that CPython/PyPy is automatically initialized (using locks in case of multi-threading) the first time any of the ``extern "Python"`` -functions is called from the C program. At that time, the custom -initialization-time Python code you put in +functions is called from the C program. (This should work even if two +different threads call the first time a function from two *different* +embedded CFFI extensions; in other words, explicit initialization is +never needed). The custom initialization-time Python code you put in ``ffi.embedding_init_code()`` is executed. If this code starts to be -big, you may consider moving it to independent modules or packages; -then the initialization-time Python code only needs to import them -(possibly after hacking around with ``sys.path``). +big, you can move it to independent modules or packages. Then the +initialization-time Python code only needs to import them. In that +case, you have to carefully set up ``sys.path`` if the modules are not +installed in the usual Python way. + +A better alternative would be to use virtualenv. How to do that is +not fully fleshed out so far. You can certainly run the whole program +with the environment variables set up by the virtualenv's ``activate`` +script first. There are probably other solutions that involve using +gcc's ``-Wl,-rpath=\$ORIGIN/`` or ``-Wl,-rpath=/fixed/path/`` options +to load a specific libpython or libypypy-c library. If you try it out +and it doesn't work the way you would like, please complain :-) Another point: right now this does not support CPython's notion of multiple subinterpreters. The logic creates a single global Python -interpreter, and runs everything in that context. Idea about how to -support that cleanly would be welcome ``:-)`` More generally, any -feedback is appreciated. +interpreter, and runs everything in that context. Maybe a future +version would have an explicit API to do that---or maybe it should be +the job of a 3rd-party extension module to provide a Python interface +over the notion of subinterpreters... + +More generally, any feedback is appreciated. Have fun, From pypy.commits at gmail.com Wed Jan 6 05:49:40 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 02:49:40 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: updates Message-ID: <568cf144.552f1c0a.96293.ffff98ca@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5592:ba9cf2218c65 Date: 2016-01-06 11:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/ba9cf2218c65/ Log: updates diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst --- a/blog/draft/cffi-embedding.rst +++ b/blog/draft/cffi-embedding.rst @@ -6,6 +6,10 @@ Python programs, in a way that is both simple and that works across CPython 2.x and 3.x and PyPy. +This post assumes that you know what CFFI is and how to use it in +API mode (``ffi.cdef()``, ``ffi.set_source()``, ``ffi.compile()``). +A quick overview can be found here__. + The major news of CFFI 1.4, released last december, was that you can now declare C functions with ``extern "Python"``, in the ``cdef()``. These magic keywords make the function callable from C (where it is @@ -51,7 +55,7 @@ supports them, just by exporting the API expected for such plugins. This is still being finalized, but please try it out. (You can also -see `embedding.py`_ directly online for a quick glance.) Here are +see `embedding.py`_ directly online for a quick glance.) See below the instructions on Linux with CPython 2.7 (CPython 3.x and non-Linux platforms are still a work in progress right now, but this should be quickly fixed): @@ -86,30 +90,31 @@ Very similar steps can be followed on PyPy, but it requires the ``cffi-static-callback-embedding`` branch of PyPy, which you must -first translate from sources. The difference is only that you need to +first translate from sources. The difference is then that you need to adapt the first ``gcc`` command line: replace ``-lpython2.7`` with ``-lpypy-c`` and to fix the ``-I`` path (and possibly add a ``-L`` path). -Note that CPython/PyPy is automatically initialized (using locks in -case of multi-threading) the first time any of the ``extern "Python"`` +Note that CPython/PyPy is automatically initialized (using locks in case +of multi-threading) the first time any of the ``extern "Python"`` functions is called from the C program. (This should work even if two different threads call the first time a function from two *different* embedded CFFI extensions; in other words, explicit initialization is never needed). The custom initialization-time Python code you put in -``ffi.embedding_init_code()`` is executed. If this code starts to be -big, you can move it to independent modules or packages. Then the -initialization-time Python code only needs to import them. In that -case, you have to carefully set up ``sys.path`` if the modules are not -installed in the usual Python way. +``ffi.embedding_init_code()`` is executed at that time. If this code +starts to be big, you can move it to independent modules or packages. +Then the initialization-time Python code only needs to import them. In +that case, you have to carefully set up ``sys.path`` if the modules are +not installed in the usual Python way. -A better alternative would be to use virtualenv. How to do that is -not fully fleshed out so far. You can certainly run the whole program -with the environment variables set up by the virtualenv's ``activate`` -script first. There are probably other solutions that involve using -gcc's ``-Wl,-rpath=\$ORIGIN/`` or ``-Wl,-rpath=/fixed/path/`` options -to load a specific libpython or libypypy-c library. If you try it out -and it doesn't work the way you would like, please complain :-) +If the Python code is big and full of dependencies, a better alternative +would be to use virtualenv. How to do that is not fully fleshed out so +far. You can certainly run the whole program with the environment +variables set up by the virtualenv's ``activate`` script first. There +are probably other solutions that involve using gcc's +``-Wl,-rpath=\$ORIGIN/`` or ``-Wl,-rpath=/fixed/path/`` options to load +a specific libpython or libypypy-c library. If you try it out and it +doesn't work the way you would like, please complain ``:-)`` Another point: right now this does not support CPython's notion of multiple subinterpreters. The logic creates a single global Python From pypy.commits at gmail.com Wed Jan 6 06:28:12 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 03:28:12 -0800 (PST) Subject: [pypy-commit] cffi default: emphasis Message-ID: <568cfa4c.aa5dc20a.74c0.fffff8a4@mx.google.com> Author: Armin Rigo Branch: Changeset: r2528:24e89b804296 Date: 2016-01-06 12:28 +0100 http://bitbucket.org/cffi/cffi/changeset/24e89b804296/ Log: emphasis diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -476,7 +476,7 @@ ``@ffi.def_extern()``. The ``@ffi.def_extern()`` decorator should be applied to a global -function, once. This is because each function from the cdef with +function, but *only once.* This is because each function from the cdef with ``extern "Python"`` turns into only one C function. To support some corner cases, it is possible to redefine the attached Python function by calling ``@ffi.def_extern()`` again---but this is not recommended! From pypy.commits at gmail.com Wed Jan 6 06:34:09 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 03:34:09 -0800 (PST) Subject: [pypy-commit] cffi default: Add vararg demo Message-ID: <568cfbb1.0c2e1c0a.999da.ffffa263@mx.google.com> Author: Armin Rigo Branch: Changeset: r2529:e3e506a393a6 Date: 2016-01-06 12:33 +0100 http://bitbucket.org/cffi/cffi/changeset/e3e506a393a6/ Log: Add vararg demo diff --git a/demo/extern_python_varargs.py b/demo/extern_python_varargs.py new file mode 100644 --- /dev/null +++ b/demo/extern_python_varargs.py @@ -0,0 +1,61 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + int my_algo(int); + typedef ... va_list; + extern "Python" int f(int, va_list *); + + int fetch_int(va_list *); + double fetch_double(va_list *); + void *fetch_ptr(va_list *); +""") + +ffi.set_source("_extern_python_cffi", """ + #include + + static int f(int, va_list *); + + static int f1(int n, ...) + { + va_list ap; + va_start(ap, n); + int res = f(n, &ap); + va_end(ap); + return res; + } + + static int fetch_int(va_list *va) { return va_arg((*va), int); } + static double fetch_double(va_list *va) { return va_arg((*va), double); } + static void * fetch_ptr(va_list *va) { return va_arg((*va), void *); } + + static int my_algo(int n) { + return f1(3, n, n+1, n+2) + f1(1, &n) + f1(2, 12.3, 45.6); + } +""") + +ffi.compile() + + +from _extern_python_cffi import ffi, lib + + at ffi.def_extern() +def f(n, va): + if n == 3: + x = lib.fetch_int(va) + y = lib.fetch_int(va) + z = lib.fetch_int(va) + print (x, y, z) + elif n == 1: + ptr = lib.fetch_ptr(va) + print 'ptr to:', ffi.cast("int *", ptr)[0] + elif n == 2: + x = lib.fetch_double(va) + y = lib.fetch_double(va) + print (x, y) + else: + raise AssertionError(n) + return 14 + +print lib.my_algo(10) diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -616,7 +616,10 @@ } The ``extern "Python"`` functions cannot be variadic for now. This -may be implemented in the future. +may be implemented in the future. (`This demo`__ shows how to do it +anyway, but it is a bit lengthy.) + +.. __: https://bitbucket.org/cffi/cffi/src/default/demo/extern_python_varargs.py Each corresponding Python callback function is defined with the ``@ffi.def_extern()`` decorator. Be careful when writing this From pypy.commits at gmail.com Wed Jan 6 07:21:40 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 04:21:40 -0800 (PST) Subject: [pypy-commit] pypy default: Crash with the same UnpicklingError as CPython when asked to unpickle Message-ID: <568d06d4.d4811c0a.20530.ffffbcdc@mx.google.com> Author: Armin Rigo Branch: Changeset: r81597:5460d8ed7191 Date: 2016-01-06 13:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5460d8ed7191/ Log: Crash with the same UnpicklingError as CPython when asked to unpickle a string with an invalid load key diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, '%s'." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': diff --git a/pypy/module/test_lib_pypy/test_cPickle.py b/pypy/module/test_lib_pypy/test_cPickle.py --- a/pypy/module/test_lib_pypy/test_cPickle.py +++ b/pypy/module/test_lib_pypy/test_cPickle.py @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." From pypy.commits at gmail.com Wed Jan 6 07:24:31 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 04:24:31 -0800 (PST) Subject: [pypy-commit] pypy default: Better use %r than '%s' in case it's a non-printable byte Message-ID: <568d077f.4f911c0a.323ea.ffffbc6b@mx.google.com> Author: Armin Rigo Branch: Changeset: r81598:c245f85e49a9 Date: 2016-01-06 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/c245f85e49a9/ Log: Better use %r than '%s' in case it's a non-printable byte diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -170,7 +170,7 @@ try: meth = self.dispatch[key] except KeyError: - raise UnpicklingError("invalid load key, '%s'." % chr(key)) + raise UnpicklingError("invalid load key, %r." % chr(key)) meth(self) key = ord(self.read(1)) except TypeError: From pypy.commits at gmail.com Wed Jan 6 07:29:40 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 06 Jan 2016 04:29:40 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <568d08b4.6351c20a.2321.58dd@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81599:b6d3c78012f2 Date: 2016-01-06 13:28 +0100 http://bitbucket.org/pypy/pypy/changeset/b6d3c78012f2/ Log: merged default added stubs for malloc nursery set the jf_descr and gcmap too early (in generate quick_failure), that is problematic, cause it lets guard_not_forced fail when an exception is raised diff too long, truncating to 2000 out of 2992 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy @@ -101,3 +103,10 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,8 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - CANNOT_FAIL) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -387,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=CANNOT_FAIL, external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -414,15 +414,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) @@ -637,7 +648,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -657,7 +668,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -638,18 +639,9 @@ return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): + a = support.cast_arg(lltype.Ptr(descr.A), a) + array = a._obj assert index >= 0 - if descr.A is descr.OUTERA: - a = support.cast_arg(lltype.Ptr(descr.A), a) - else: - # we use rffi.cast instead of support.cast_arg because the types - # might not be "compatible" enough from the lltype point of - # view. In particular, this happens when we use - # str_storage_getitem, in which an rpy_string is casted to - # rpy_string_as_Signed (or similar) - a = rffi.cast(lltype.Ptr(descr.OUTERA), a) - a = getattr(a, descr.OUTERA._arrayfld) - array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc @@ -714,6 +706,24 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -725,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1086,9 +1085,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1111,9 +1110,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -92,6 +92,12 @@ # fill in the jf_descr and jf_gcmap fields of the frame according # to which failure we are resuming from. These are set before # this function is called (see generate_quick_failure()). + + ofs = self.cpu.get_ofs_of_frame_field('jf_descr') + ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') + self.mc.STG(r.SCRATCH2, l.addr(ofs2, r.SPP)) + self.mc.STG(r.SCRATCH, l.addr(ofs, r.SPP)) + self._push_core_regs_to_jitframe(mc) if withfloats: self._push_fp_regs_to_jitframe(mc) @@ -123,13 +129,10 @@ assert target != 0 pool_offset = guardtok._pool_offset - ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') # overwrite the gcmap in the jitframe offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET - self.mc.LG(r.SCRATCH, l.pool(offset)) - self.mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) + self.mc.LG(r.SCRATCH2, l.pool(offset)) # overwrite the target in pool offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET @@ -138,7 +141,6 @@ self.mc.load_imm(r.SCRATCH, fail_descr) #self.mc.LGFI(r.SCRATCH, l.imm(fail_descr)) - self.mc.STG(r.SCRATCH, l.addr(ofs, r.SPP)) self.mc.BCR(l.imm(0xf), r.r14) return startpos diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -133,7 +133,7 @@ self.TRAP2() def trace(self): - pass + self.SVC(l.imm(142)) #self.LGHI(r.r2, 17) #self.XGR(r.r3, r.r3) #self.SVC(l.imm(17)) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -747,6 +747,48 @@ def prepare_call_malloc_gc(self, op): return self._prepare_call_default(op) + def prepare_call_malloc_nursery(self, op): + xxx + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) + tmp_box = TempInt() + self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) + self.rm.temp_boxes.append(tmp_box) + return [] + + def prepare_call_malloc_nursery_varsize_frame(self, op): + xxx + sizeloc = self.ensure_reg(op.getarg(0)) + # sizeloc must be in a register, but we can free it now + # (we take care explicitly of conflicts with r.RES or r.RSZ) + self.free_op_vars() + # the result will be in r.RES + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) + # we need r.RSZ as a temporary + tmp_box = TempInt() + self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) + self.rm.temp_boxes.append(tmp_box) + return [sizeloc] + + def prepare_call_malloc_nursery_varsize(self, op): + xxx + # the result will be in r.RES + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) + # we need r.RSZ as a temporary + tmp_box = TempInt() + self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) + self.rm.temp_boxes.append(tmp_box) + # length_box always survives: it's typically also present in the + # next operation that will copy it inside the new array. Make + # sure it is in a register different from r.RES and r.RSZ. (It + # should not be a ConstInt at all.) + length_box = op.getarg(2) + lengthloc = self.ensure_reg(length_box) + return [lengthloc] + + def _prepare_gc_load(self, op): base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1021,18 +1021,20 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) - elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): - # special-case 2: GcStruct with Array field - v_inst, c_field, v_index = op.args - STRUCT = v_inst.concretetype.TO - ARRAY = getattr(STRUCT, c_field.value) - assert isinstance(ARRAY, lltype.Array) - arraydescr = self.cpu.arraydescrof(STRUCT) - kind = getkind(op.result.concretetype)[0] - assert kind in ('i', 'f') - return SpaceOperation('getarrayitem_gc_%s' % kind, - [op.args[0], v_index, arraydescr], - op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) else: assert False, 'not supported' @@ -1084,6 +1086,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1434,6 +1434,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -948,6 +948,7 @@ # the virtualrefs and virtualizable have been forced by # handle_async_forcing() just a moment ago. from rpython.jit.metainterp.blackhole import resume_in_blackhole + import pdb; pdb.set_trace() hidden_all_virtuals = metainterp_sd.cpu.get_savedata_ref(deadframe) obj = AllVirtuals.show(metainterp_sd.cpu, hidden_all_virtuals) all_virtuals = obj.cache diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -535,16 +535,10 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem it might happen that op.getarg(0) is - # a virtual string, NOT an array. In that case, we cannot cache the - # getarrayitem as if it were an array, obviously. In theory we could - # improve by writing special code to interpter the buffer of the - # virtual string as if it were an array, but it looks complicate, - # fragile and not worth it. arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) cf = None - if indexb.is_constant() and not arrayinfo.is_vstring(): + if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant @@ -561,7 +555,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # the remember the result of reading the array item - if cf is not None and not arrayinfo.is_vstring(): + if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -24,9 +24,6 @@ def is_virtual(self): return False - def is_vstring(self): - return False - def is_precise(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -277,10 +277,8 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem we op.getarg(0) is a string, NOT an - # array, hence the check. In that case, it will be forced opinfo = self.getptrinfo(op.getarg(0)) - if opinfo and opinfo.is_virtual() and not opinfo.is_vstring(): + if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: item = opinfo.getitem(op.getdescr(), indexbox.getint()) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -62,9 +62,6 @@ self.mode = mode self.length = length - def is_vstring(self): - return True - def getlenbound(self, mode): from rpython.jit.metainterp.optimizeopt import intutils diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -810,6 +810,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) diff --git a/rpython/jit/metainterp/test/test_strstorage.py b/rpython/jit/metainterp/test/test_strstorage.py --- a/rpython/jit/metainterp/test/test_strstorage.py +++ b/rpython/jit/metainterp/test/test_strstorage.py @@ -19,7 +19,7 @@ res = self.interp_operations(f, [], supports_singlefloats=True) # kind = getkind(TYPE)[0] # 'i' or 'f' - self.check_operations_history({'getarrayitem_gc_%s' % kind: 1, + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, 'finish': 1}) # if TYPE == lltype.SingleFloat: @@ -29,8 +29,8 @@ return longlong.int2singlefloat(res) return res - def str_storage_supported(self, TYPE): - py.test.skip('this is not a JIT test') + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') def test_force_virtual_str_storage(self): byteorder = sys.byteorder @@ -48,6 +48,6 @@ 'strsetitem': 1, # str forcing 'call_pure_r': 1, # str forcing (copystrcontent) 'guard_no_exception': 1, # str forcing - 'getarrayitem_gc_i': 1, # str_storage_getitem + 'gc_load_indexed_i': 1, # str_storage_getitem 'finish': 1 }) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -97,6 +97,18 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly + if isinstance(buffer, SubBuffer): # don't nest them + # we want a view (offset, size) over a view + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,20 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_lowlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py - RPython_StartupCode() - rffi.stackcounter.stacks_counter -= 1 diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. @@ -333,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2794,8 +2794,10 @@ def parse_digit_string(parser): # helper for fromstr + base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) a = rbigint() - base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2813,52 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + from rpython.rlib.rstring import ParseStringError + + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -11,7 +11,6 @@ from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -12,7 +12,7 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib import rarithmetic from rpython.rtyper.lltypesystem import rffi @@ -185,13 +185,14 @@ data = fmtiter.read(size) fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) return - if not str_storage_supported(TYPE): - # this happens e.g. on win32 and ARM32: we cannot read the string - # content as an array of doubles because it's not properly - # aligned. But we can read a longlong and convert to float - assert TYPE == rffi.DOUBLE - assert rffi.sizeof(TYPE) == 8 - return unpack_longlong2float(fmtiter) + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) try: # fast path val = unpack_fastpath(TYPE)(fmtiter) @@ -246,7 +247,7 @@ @specialize.argtype(0) def unpack_int_fastpath_maybe(fmtiter): - if fmtiter.bigendian != native_is_bigendian or not str_storage_supported(TYPE): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): return False try: intvalue = unpack_fastpath(TYPE)(fmtiter) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,20 +381,41 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). + """ + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) @@ -399,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py --- a/rpython/rlib/strstorage.py +++ b/rpython/rlib/strstorage.py @@ -9,54 +9,31 @@ # rstr.py:copy_string_contents), which has no chance to work during # tracing # -# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading -# from GC pointers. However: -# -# a. we would like to use a CompositeOffset as the offset (using the -# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet) -# supported before translation: it works only if you pass an actual -# integer -# -# b. raw_load from a GC pointer is not (yet) supported by the -# JIT. There are plans to introduce a gc_load operation: when it -# will be there, we could fix the issue above and actually use it to -# implement str_storage_getitem -# -# 3. the actual solution: cast rpy_string to a GcStruct which has the very +# 2. cast rpy_string to a GcStruct which has the very # same layout, with the only difference that its 'chars' field is no # longer an Array(Char) but e.e. an Array(Signed). Then, we just need to -# read the appropriate index into the array From pypy.commits at gmail.com Wed Jan 6 07:31:04 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 04:31:04 -0800 (PST) Subject: [pypy-commit] pypy default: 32-bit fix Message-ID: <568d0908.11301c0a.efb2b.ffffb1b2@mx.google.com> Author: Armin Rigo Branch: Changeset: r81600:c0170b8797bc Date: 2016-01-06 13:30 +0100 http://bitbucket.org/pypy/pypy/changeset/c0170b8797bc/ Log: 32-bit fix diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -722,7 +722,8 @@ def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): if bytes != 8: raise Exception("gc_load_indexed_f is only for 'double'!") - return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -399,9 +399,7 @@ rop.GC_LOAD_I, rop.GC_LOAD_R, rop.GC_LOAD_F, - rop.GC_LOAD_INDEXED_I, rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_F, rop.GC_STORE, rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl From pypy.commits at gmail.com Wed Jan 6 08:53:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 06 Jan 2016 05:53:22 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed debug statement, r2 is the return register on the s390x not r3 Message-ID: <568d1c52.41dd1c0a.9e531.ffffd6a3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81601:6151e73da389 Date: 2016-01-06 14:52 +0100 http://bitbucket.org/pypy/pypy/changeset/6151e73da389/ Log: removed debug statement, r2 is the return register on the s390x not r3 diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -113,10 +113,10 @@ def push_gcmap(self): # we push *now* the gcmap, describing the status of GC registers # after the rearrangements done just before, ignoring the return - # value r3, if necessary + # value r2, if necessary assert not self.is_call_release_gil noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() - gcmap = self.asm._regalloc.get_gcmap([r.r3], noregs=noregs) + gcmap = self.asm._regalloc.get_gcmap([r.r2], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) def pop_gcmap(self): diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -948,7 +948,6 @@ # the virtualrefs and virtualizable have been forced by # handle_async_forcing() just a moment ago. from rpython.jit.metainterp.blackhole import resume_in_blackhole - import pdb; pdb.set_trace() hidden_all_virtuals = metainterp_sd.cpu.get_savedata_ref(deadframe) obj = AllVirtuals.show(metainterp_sd.cpu, hidden_all_virtuals) all_virtuals = obj.cache From pypy.commits at gmail.com Wed Jan 6 15:50:24 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 12:50:24 -0800 (PST) Subject: [pypy-commit] cffi default: Add a passing test about "from mymodule import *" Message-ID: <568d7e10.4473c20a.ddf72.fffff1b5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2531:4b3ae105e768 Date: 2016-01-06 21:38 +0100 http://bitbucket.org/cffi/cffi/changeset/4b3ae105e768/ Log: Add a passing test about "from mymodule import *" diff --git a/testing/cffi1/test_new_ffi_1.py b/testing/cffi1/test_new_ffi_1.py --- a/testing/cffi1/test_new_ffi_1.py +++ b/testing/cffi1/test_new_ffi_1.py @@ -1718,3 +1718,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) From pypy.commits at gmail.com Wed Jan 6 15:50:26 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 12:50:26 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: merge heads Message-ID: <568d7e12.624fc20a.13d6d.ffffcce3@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2532:4b2de9517c5d Date: 2016-01-06 21:50 +0100 http://bitbucket.org/cffi/cffi/changeset/4b2de9517c5d/ Log: merge heads diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -60,6 +60,7 @@ path = self.get_path() env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + print 'running %r in %r' % (name, path) popen = subprocess.Popen([name], cwd=path, env=env, stdout=subprocess.PIPE) result = popen.stdout.read() From pypy.commits at gmail.com Wed Jan 6 15:50:22 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 06 Jan 2016 12:50:22 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Check that the module is present in sys.modules at this point, Message-ID: <568d7e0e.552f1c0a.96293.69ce@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2530:15ecadc24cb7 Date: 2016-01-06 21:28 +0100 http://bitbucket.org/cffi/cffi/changeset/15ecadc24cb7/ Log: Check that the module is present in sys.modules at this point, and that it is re-importable diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -10,6 +10,13 @@ import sys sys.stdout.write("prepADD2\n") + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + assert m is _add2_cffi + assert _add2_cffi.ffi is ffi + assert _add2_cffi.lib is lib + @ffi.def_extern() def add2(x, y, z): sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) From pypy.commits at gmail.com Thu Jan 7 04:09:54 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 01:09:54 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Stop running the embedding_init_code() code as if it was part of the Message-ID: <568e2b62.42661c0a.c9342.1fe9@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2533:1c789b36273d Date: 2016-01-07 10:09 +0100 http://bitbucket.org/cffi/cffi/changeset/1c789b36273d/ Log: Stop running the embedding_init_code() code as if it was part of the extension module. Instead, be explicit and require a "from xx import ffi" line. This is clearer because it is the same line needed at the start of other modules, if the logic becomes too large for this single triple-quoted string. diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -120,7 +120,7 @@ */ int result; PyGILState_STATE state; - PyObject *pycode=NULL, *m=NULL, *global_dict, *x; + PyObject *pycode=NULL, *global_dict=NULL, *x; /* Acquire the GIL. We have no threadstate here. If Python is already initialized, it is possible that there is already one @@ -165,17 +165,15 @@ /* Now run the Python code provided to ffi.embedding_init_code(). */ - m = PyImport_ImportModule(_CFFI_MODULE_NAME); - if (m == NULL) - goto error; pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, "", Py_file_input); if (pycode == NULL) goto error; - global_dict = PyModule_GetDict(m); - if (PyDict_GetItemString(global_dict, "__builtins__") == NULL && - PyDict_SetItemString(global_dict, "__builtins__", + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", PyThreadState_GET()->interp->builtins) < 0) goto error; x = PyEval_EvalCode((PyCodeObject *)pycode, global_dict, global_dict); @@ -194,7 +192,7 @@ result = 0; done: Py_XDECREF(pycode); - Py_XDECREF(m); + Py_XDECREF(global_dict); PyGILState_Release(state); return result; diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -9,6 +9,7 @@ """, dllexport=True) ffi.embedding_init_code(""" + from _embedding_cffi import ffi print "preparing" # printed once @ffi.def_extern() diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -15,6 +15,8 @@ sys.stdout.write(".") sys.stdout.write("\n") + from _add1_cffi import ffi + int(ord("A")) # check that built-ins are there @ffi.def_extern() diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -13,9 +13,7 @@ assert '_add2_cffi' in sys.modules m = sys.modules['_add2_cffi'] import _add2_cffi - assert m is _add2_cffi - assert _add2_cffi.ffi is ffi - assert _add2_cffi.lib is lib + ffi = _add2_cffi.ffi @ffi.def_extern() def add2(x, y, z): diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py --- a/testing/embedding/add3.py +++ b/testing/embedding/add3.py @@ -7,6 +7,7 @@ """, dllexport=True) ffi.embedding_init_code(r""" + from _add3_cffi import ffi import sys @ffi.def_extern() diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -8,6 +8,7 @@ """, dllexport=True) ffi.embedding_init_code(r""" + from _add_recursive_cffi import ffi, lib print "preparing REC" @ffi.def_extern() diff --git a/testing/embedding/perf.py b/testing/embedding/perf.py --- a/testing/embedding/perf.py +++ b/testing/embedding/perf.py @@ -7,6 +7,8 @@ """, dllexport=True) ffi.embedding_init_code(r""" + from _perf_cffi import ffi + @ffi.def_extern() def add1(x, y): return x + y diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py --- a/testing/embedding/tlocal.py +++ b/testing/embedding/tlocal.py @@ -7,6 +7,7 @@ """, dllexport=True) ffi.embedding_init_code(r""" + from _tlocal_cffi import ffi import thread, itertools tloc = thread._local() g_seen = itertools.count() From pypy.commits at gmail.com Thu Jan 7 04:19:53 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 01:19:53 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: update to cffi/1c789b36273d Message-ID: <568e2db9.cb571c0a.c2023.27e2@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81602:455a5e6e72ea Date: 2016-01-07 10:18 +0100 http://bitbucket.org/pypy/pypy/changeset/455a5e6e72ea/ Log: update to cffi/1c789b36273d diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -45,4 +45,3 @@ w_modules_dict = space.sys.get('modules') space.setitem(w_modules_dict, w_name, space.wrap(module)) space.setitem(w_modules_dict, space.wrap(name + '.lib'), space.wrap(lib)) - return module diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -32,13 +32,12 @@ os_thread.setup_threads(space) # name = rffi.charp2str(init_struct.name) - module = load_cffi1_module(space, name, None, init_struct.func) + load_cffi1_module(space, name, None, init_struct.func) code = rffi.charp2str(init_struct.code) compiler = space.createcompiler() pycode = compiler.compile(code, "" % name, 'exec', 0) - w_globals = module.getdict(space) - space.call_method(w_globals, "setdefault", space.wrap("__builtins__"), - space.wrap(space.builtin)) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) pycode.exec_code(space, w_globals, w_globals) From pypy.commits at gmail.com Thu Jan 7 04:48:25 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 01:48:25 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: need to save all registers before assembling call_release_gil*, accidentally put false to save_all_regs Message-ID: <568e3469.2457c20a.d9372.ffffb4f9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81603:5d0c94086009 Date: 2016-01-07 09:27 +0100 http://bitbucket.org/pypy/pypy/changeset/5d0c94086009/ Log: need to save all registers before assembling call_release_gil*, accidentally put false to save_all_regs diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -905,13 +905,12 @@ prepare_call_may_force_n = _prepare_call_may_force def _prepare_call_release_gil(self, op): - save_all_regs = False errno_box = op.getarg(0) assert isinstance(errno_box, ConstInt) args = [None, l.imm(errno_box.value)] for i in range(1,op.numargs()): args.append(self.loc(op.getarg(i))) - self._spill_before_call(save_all_regs) + self._spill_before_call(save_all_regs=True) if op.type != VOID: resloc = self.after_call(op) args[0] = resloc From pypy.commits at gmail.com Thu Jan 7 04:48:27 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 01:48:27 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added zrpy_gc test file, Message-ID: <568e346b.520e1c0a.322f7.2e65@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81604:b4ea2e45c0e9 Date: 2016-01-07 10:47 +0100 http://bitbucket.org/pypy/pypy/changeset/b4ea2e45c0e9/ Log: added zrpy_gc test file, added _build_malloc_slow_path function diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -91,6 +91,8 @@ assert nos == [0, 1, 47] elif self.cpu.backend_name.startswith('ppc64'): assert nos == [0, 1, 33] + elif self.cpu.backend_name.startswith('zarch'): + assert nos == [0, 1, 35] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -425,6 +425,86 @@ self.mc = None return mc.materialize(self.cpu, []) + def _build_malloc_slowpath(self, kind): + """ While arriving on slowpath, we have a gcmap in SCRATCH. + The arguments are passed in r.RES and r.RSZ, as follows: + + kind == 'fixed': nursery_head in r.RES and the size in r.RSZ - r.RES. + + kind == 'str/unicode': length of the string to allocate in r.RES. + + kind == 'var': itemsize in r.RES, length to allocate in r.RSZ, + and tid in r.SCRATCH2. + + This function must preserve all registers apart from r.RES and r.RSZ. + On return, SCRATCH must contain the address of nursery_free. + """ + assert kind in ['fixed', 'str', 'unicode', 'var'] + mc = InstrBuilder() + self.mc = mc + ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') + mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) + saved_regs = [reg for reg in r.MANAGED_REGS + if reg is not r.RES and reg is not r.RSZ] + self._push_core_regs_to_jitframe(mc, saved_regs) + self._push_fp_regs_to_jitframe(mc) + # + if kind == 'fixed': + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() + elif kind == 'str': + addr = self.cpu.gc_ll_descr.get_malloc_fn_addr('malloc_str') + elif kind == 'unicode': + addr = self.cpu.gc_ll_descr.get_malloc_fn_addr('malloc_unicode') + else: + addr = self.cpu.gc_ll_descr.get_malloc_slowpath_array_addr() + + if kind == 'fixed': + # compute the size we want + mc.LGR(r.r3, r.RES) + mc.SGR(r.r3, r.RSZ) + if hasattr(self.cpu.gc_ll_descr, 'passes_frame'): + # for tests only + mc.LGR(r.r4, r.SPP) + elif kind == 'str' or kind == 'unicode': + pass # length is already in r3 + else: + # arguments to the called function are [itemsize, tid, length] + # itemsize is already in r3 + mc.LGR(r.r5, r.RSZ) # length + mc.LGR(r.r4, r.SCRATCH2) # tid + + # Do the call + addr = rffi.cast(lltype.Signed, addr) + mc.load_imm(mc.RAW_CALL_REG, addr) + mc.push_std_frame() + mc.store_link() + mc.raw_call() + mc.restore_link + mc.pop_std_frame() + + self._reload_frame_if_necessary(mc) + + # Check that we don't get NULL; if we do, we always interrupt the + # current loop, as a "good enough" approximation (same as + # emit_call_malloc_gc()). + self.propagate_memoryerror_if_r2_is_null() + + self._pop_core_regs_from_jitframe(mc, saved_regs) + self._pop_fp_regs_from_jitframe(mc) + + nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() + self.mc.load_imm(r.SCRATCH, nursery_free_adr) + + # r.SCRATCH is now the address of nursery_free + # r.RES is still the result of the call done above + # r.RSZ is loaded from [SCRATCH], to make the caller's store a no-op here + mc.load(r.RSZ, r.SCRATCH, 0) + # + mc.BCR(c.ANY, r.r14) + self.mc = None + return mc.materialize(self.cpu, []) + + def _build_stack_check_slowpath(self): _, _, slowpathaddr = self.cpu.insert_stack_check() if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -16,6 +16,8 @@ SCRATCH = r1 SCRATCH2 = r0 GPR_RETURN = r2 +RES = r2 +RSZ = r3 [f0,f1,f2,f3,f4,f5,f6,f7,f8, f9,f10,f11,f12,f13,f14,f15] = fpregisters diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -16,6 +16,8 @@ supports_floats = True from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE + backend_name = 'zarch' + IS_64_BIT = True frame_reg = r.SP diff --git a/rpython/jit/backend/zarch/test/test_zrpy_gc.py b/rpython/jit/backend/zarch/test/test_zrpy_gc.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_zrpy_gc.py @@ -0,0 +1,6 @@ +from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests + + +class TestShadowStack(CompileFrameworkTests): + gcrootfinder = "shadowstack" + gc = "incminimark" From pypy.commits at gmail.com Thu Jan 7 05:26:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 02:26:09 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: malloc_cond ported to s390x Message-ID: <568e3d41.6918c20a.deb73.3282@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81605:e16953ea7521 Date: 2016-01-07 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/e16953ea7521/ Log: malloc_cond ported to s390x diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -942,7 +942,7 @@ return start def _reload_frame_if_necessary(self, mc, shadowstack_reg=None): - # might trash the VOLATILE registers different from r3 and f1 + # might trash the VOLATILE registers different from r2 and f0 gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: @@ -1013,18 +1013,6 @@ self.mc.LMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) self.jmpto(r.r14) - def _push_all_regs_to_stack(self, mc, withfloats, callee_only=False): - # not used!! - # TODO remove if not needed - base_ofs = 2*WORD - if callee_only: - regs = ZARCHRegisterManager.save_around_call_regs - else: - regs = r.registers[2:] - mc.STMG(regs[0], regs[1], l.addr(base_ofs, r.SP)) - if withfloats: - xxx - def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, callee_only=False): # Push all general purpose registers base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -1193,6 +1181,56 @@ ptr = rffi.cast(lltype.Signed, gcmap) mc.load_imm(reg, ptr) + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): + assert size & (WORD-1) == 0 # must be correctly aligned + + # We load into RES the address stored at nursery_free_adr. We + # calculate the new value for nursery_free_adr and store it in + # RSZ. Then we load the address stored in nursery_top_adr + # into SCRATCH. In the rare case where the value in RSZ is + # (unsigned) bigger than the one in SCRATCH we call + # malloc_slowpath. In the common case where malloc_slowpath + # is not called, we must still write RSZ back into + # nursery_free_adr (r1); so we do it always, even if we called + # malloc_slowpath. + + diff = nursery_top_adr - nursery_free_adr + assert check_imm_value(diff) + mc = self.mc + mc.load_imm(r.r1, nursery_free_adr) + + mc.load(r.RES, r.r1, 0) # load nursery_free + mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top + + mc.LGR(r.RSZ, r.RES) + if check_imm_value(size): + mc.AGHI(r.RSZ, l.imm(size)) + else: + mc.load_imm(r.SCRATCH, l.imm(size)) + mc.AGR(r.RSZ, r.SCRATCH) + + mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) + + fast_jmp_pos = mc.currpos() + mc.reserve_cond_jump() # conditional jump, patched later + + + # new value of nursery_free_adr in RSZ and the adr of the new object + # in RES. + self.load_gcmap(mc, r.SCRATCH, gcmap) + # We are jumping to malloc_slowpath without a call through a function + # descriptor, because it is an internal call and "call" would trash + # r2 and r11 + mc.branch_absolute(self.malloc_slowpath) + + offset = mc.currpos() - fast_jmp_pos + pmc = OverwritingBuilder(mc, fast_jmp_pos, 1) + pmc.BRCL(c.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true + pmc.overwrite() + + mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free + + def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, sizeloc, gcmap): xxx diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -198,6 +198,14 @@ """ self.BASR(r.RETURN, call_reg) + def reserve_cond_jump(self): + self.trap() # conditional jump, patched later + self.write('\x00'*4) + + def branch_absolute(self, addr): + self.load_imm(r.r14, addr) + self.BASR(r.r14, r.r14) + def store_link(self): self.STG(r.RETURN, l.addr(14*WORD, r.SP)) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -7,6 +7,7 @@ gen_emit_imm_pool_rr) from rpython.jit.backend.zarch.helper.regalloc import (check_imm, check_imm_value) +from rpython.jit.metainterp.history import (ConstInt) from rpython.jit.backend.zarch.codebuilder import ZARCHGuardToken, InstrBuilder from rpython.jit.backend.llsupport import symbolic, jitframe import rpython.jit.backend.zarch.conditions as c diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -748,7 +748,6 @@ return self._prepare_call_default(op) def prepare_call_malloc_nursery(self, op): - xxx self.rm.force_allocate_reg(op, selected_reg=r.RES) self.rm.temp_boxes.append(op) tmp_box = TempInt() diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -17,7 +17,7 @@ SCRATCH2 = r0 GPR_RETURN = r2 RES = r2 -RSZ = r3 +RSZ = r6 [f0,f1,f2,f3,f4,f5,f6,f7,f8, f9,f10,f11,f12,f13,f14,f15] = fpregisters From pypy.commits at gmail.com Thu Jan 7 05:26:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 02:26:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: malloc_cond_varsize_frame ported to s390x Message-ID: <568e3d43.022f1c0a.d0118.3ca0@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81606:2c7ffb3a804c Date: 2016-01-07 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/2c7ffb3a804c/ Log: malloc_cond_varsize_frame ported to s390x diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1212,7 +1212,7 @@ mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) fast_jmp_pos = mc.currpos() - mc.reserve_cond_jump() # conditional jump, patched later + mc.reserve_cond_jump(short=True) # conditional jump, patched later # new value of nursery_free_adr in RSZ and the adr of the new object @@ -1225,7 +1225,7 @@ offset = mc.currpos() - fast_jmp_pos pmc = OverwritingBuilder(mc, fast_jmp_pos, 1) - pmc.BRCL(c.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true + pmc.BRC(c.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true pmc.overwrite() mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free @@ -1233,37 +1233,38 @@ def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, sizeloc, gcmap): - xxx diff = nursery_top_adr - nursery_free_adr - assert _check_imm_arg(diff) + assert check_imm_value(diff) mc = self.mc - mc.load_imm(r.r2, nursery_free_adr) + mc.load_imm(r.r1, nursery_free_adr) if sizeloc is r.RES: - mc.mr(r.RSZ.value, r.RES.value) + mc.LGR(r.RSZ, r.RES) sizeloc = r.RSZ - mc.load(r.RES.value, r.r2.value, 0) # load nursery_free - mc.load(r.SCRATCH.value, r.r2.value, diff) # load nursery_top + mc.load(r.RES, l.addr(0, r.r1)) # load nursery_free + mc.load(r.SCRATCH2, l.addr(diff, r.r1)) # load nursery_top - mc.add(r.RSZ.value, r.RES.value, sizeloc.value) + mc.LGR(r.SCRATCH, r.RES) + mc.AGR(r.SCRATCH, sizeloc) # sizeloc can be RSZ + mc.LGR(r.RSZ, SCRATCH) - mc.cmp_op(0, r.RSZ.value, r.SCRATCH.value, signed=False) + mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) fast_jmp_pos = mc.currpos() - mc.trap() # conditional jump, patched later + mc.reserve_cond_jump(short=True) # conditional jump, patched later # new value of nursery_free_adr in RSZ and the adr of the new object # in RES. - self.load_gcmap(mc, r.r2, gcmap) - mc.bl_abs(self.malloc_slowpath) + self.load_gcmap(mc, r.r1, gcmap) + mc.branch_absolute(self.malloc_slowpath) offset = mc.currpos() - fast_jmp_pos pmc = OverwritingBuilder(mc, fast_jmp_pos, 1) - pmc.bc(7, 1, offset) # jump if LE (not GT), predicted to be true + pmc.BRC(l.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true pmc.overwrite() - mc.store(r.RSZ.value, r.r2.value, 0) # store into nursery_free + mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, @@ -1382,7 +1383,6 @@ pmc.b(offset) # jump always pmc.overwrite() - def notimplemented_op(asm, op, arglocs, regalloc): print "[ZARCH/asm] %s not implemented" % op.getopname() raise NotImplementedError(op) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -198,9 +198,11 @@ """ self.BASR(r.RETURN, call_reg) - def reserve_cond_jump(self): + def reserve_cond_jump(self, short=False): self.trap() # conditional jump, patched later - self.write('\x00'*4) + self.trap() # conditional jump, patched later + if not short: + self.trap() # conditional jump, patched later def branch_absolute(self, addr): self.load_imm(r.r14, addr) From pypy.commits at gmail.com Thu Jan 7 06:14:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 03:14:00 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: scaling the length loc in rewrite for malloc_cond_varsize Message-ID: <568e4878.0c2e1c0a.999da.4733@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81607:b3c83f9341fe Date: 2016-01-07 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/b3c83f9341fe/ Log: scaling the length loc in rewrite for malloc_cond_varsize diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -785,6 +785,10 @@ arraydescr.lendescr.offset != gc_descr.standard_array_length_ofs)): return False self.emitting_an_operation_that_can_collect() + scale = itemsize + if scale not in self.cpu.load_supported_factors: + scale, offset, v_length = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, [ConstInt(kind), ConstInt(itemsize), v_length], descr=arraydescr) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1502,32 +1502,6 @@ genop_gc_load_indexed_r = _genop_gc_load_indexed genop_gc_load_indexed_f = _genop_gc_load_indexed - def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): - """Produce one operation to do roughly - targetreg = sourcereg * itemsize - except that the targetreg may still need shifting by 0,1,2,3. - """ - if (itemsize & 7) == 0: - shift = 3 - elif (itemsize & 3) == 0: - shift = 2 - elif (itemsize & 1) == 0: - shift = 1 - else: - shift = 0 - itemsize >>= shift - # - if valid_addressing_size(itemsize - 1): - mc.LEA_ra(targetreg, (sourcereg, sourcereg, - get_scale(itemsize - 1), 0)) - elif valid_addressing_size(itemsize): - mc.LEA_ra(targetreg, (rx86.NO_BASE_REGISTER, sourcereg, - get_scale(itemsize), 0)) - else: - mc.IMUL_rri(targetreg, sourcereg, itemsize) - # - return shift - def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -2354,12 +2328,8 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - if valid_addressing_size(itemsize): - shift = get_scale(itemsize) - else: - shift = self._imul_const_scaled(self.mc, edi.value, - varsizeloc.value, itemsize) - varsizeloc = edi + assert valid_addressing_size(itemsize): + shift = get_scale(itemsize) # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1010,7 +1010,7 @@ self.rm.possibly_free_var(length_box) # itemsize = op.getarg(1).getint() - maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) / itemsize + maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) self.assembler.malloc_cond_varsize( op.getarg(0).getint(), gc_ll_descr.get_nursery_free_addr(), From pypy.commits at gmail.com Thu Jan 7 06:15:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 03:15:29 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: merged default Message-ID: <568e48d1.8205c20a.c5c42.059e@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81608:6c7db6a22424 Date: 2016-01-07 12:14 +0100 http://bitbucket.org/pypy/pypy/changeset/6c7db6a22424/ Log: merged default diff too long, truncating to 2000 out of 2916 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy @@ -101,3 +103,10 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,8 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - CANNOT_FAIL) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -387,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=CANNOT_FAIL, external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -414,15 +414,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) @@ -637,7 +648,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -657,7 +668,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/pypy/module/test_lib_pypy/test_cPickle.py b/pypy/module/test_lib_pypy/test_cPickle.py --- a/pypy/module/test_lib_pypy/test_cPickle.py +++ b/pypy/module/test_lib_pypy/test_cPickle.py @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -638,18 +639,9 @@ return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): + a = support.cast_arg(lltype.Ptr(descr.A), a) + array = a._obj assert index >= 0 - if descr.A is descr.OUTERA: - a = support.cast_arg(lltype.Ptr(descr.A), a) - else: - # we use rffi.cast instead of support.cast_arg because the types - # might not be "compatible" enough from the lltype point of - # view. In particular, this happens when we use - # str_storage_getitem, in which an rpy_string is casted to - # rpy_string_as_Signed (or similar) - a = rffi.cast(lltype.Ptr(descr.OUTERA), a) - a = getattr(a, descr.OUTERA._arrayfld) - array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc @@ -714,6 +706,25 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -725,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1086,9 +1085,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1111,9 +1110,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1021,18 +1021,20 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) - elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): - # special-case 2: GcStruct with Array field - v_inst, c_field, v_index = op.args - STRUCT = v_inst.concretetype.TO - ARRAY = getattr(STRUCT, c_field.value) - assert isinstance(ARRAY, lltype.Array) - arraydescr = self.cpu.arraydescrof(STRUCT) - kind = getkind(op.result.concretetype)[0] - assert kind in ('i', 'f') - return SpaceOperation('getarrayitem_gc_%s' % kind, - [op.args[0], v_index, arraydescr], - op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) else: assert False, 'not supported' @@ -1084,6 +1086,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1434,6 +1434,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -399,9 +399,7 @@ rop.GC_LOAD_I, rop.GC_LOAD_R, rop.GC_LOAD_F, - rop.GC_LOAD_INDEXED_I, rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_F, rop.GC_STORE, rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -535,16 +535,10 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem it might happen that op.getarg(0) is - # a virtual string, NOT an array. In that case, we cannot cache the - # getarrayitem as if it were an array, obviously. In theory we could - # improve by writing special code to interpter the buffer of the - # virtual string as if it were an array, but it looks complicate, - # fragile and not worth it. arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) cf = None - if indexb.is_constant() and not arrayinfo.is_vstring(): + if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant @@ -561,7 +555,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # the remember the result of reading the array item - if cf is not None and not arrayinfo.is_vstring(): + if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -24,9 +24,6 @@ def is_virtual(self): return False - def is_vstring(self): - return False - def is_precise(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -277,10 +277,8 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem we op.getarg(0) is a string, NOT an - # array, hence the check. In that case, it will be forced opinfo = self.getptrinfo(op.getarg(0)) - if opinfo and opinfo.is_virtual() and not opinfo.is_vstring(): + if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: item = opinfo.getitem(op.getdescr(), indexbox.getint()) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -62,9 +62,6 @@ self.mode = mode self.length = length - def is_vstring(self): - return True - def getlenbound(self, mode): from rpython.jit.metainterp.optimizeopt import intutils diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -810,6 +810,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) diff --git a/rpython/jit/metainterp/test/test_strstorage.py b/rpython/jit/metainterp/test/test_strstorage.py --- a/rpython/jit/metainterp/test/test_strstorage.py +++ b/rpython/jit/metainterp/test/test_strstorage.py @@ -19,7 +19,7 @@ res = self.interp_operations(f, [], supports_singlefloats=True) # kind = getkind(TYPE)[0] # 'i' or 'f' - self.check_operations_history({'getarrayitem_gc_%s' % kind: 1, + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, 'finish': 1}) # if TYPE == lltype.SingleFloat: @@ -29,8 +29,8 @@ return longlong.int2singlefloat(res) return res - def str_storage_supported(self, TYPE): - py.test.skip('this is not a JIT test') + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') def test_force_virtual_str_storage(self): byteorder = sys.byteorder @@ -48,6 +48,6 @@ 'strsetitem': 1, # str forcing 'call_pure_r': 1, # str forcing (copystrcontent) 'guard_no_exception': 1, # str forcing - 'getarrayitem_gc_i': 1, # str_storage_getitem + 'gc_load_indexed_i': 1, # str_storage_getitem 'finish': 1 }) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -97,6 +97,18 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly + if isinstance(buffer, SubBuffer): # don't nest them + # we want a view (offset, size) over a view + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,20 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_lowlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py - RPython_StartupCode() - rffi.stackcounter.stacks_counter -= 1 diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. @@ -333,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2794,8 +2794,10 @@ def parse_digit_string(parser): # helper for fromstr + base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) a = rbigint() - base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2813,52 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + from rpython.rlib.rstring import ParseStringError + + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -11,7 +11,6 @@ from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -12,7 +12,7 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib import rarithmetic from rpython.rtyper.lltypesystem import rffi @@ -185,13 +185,14 @@ data = fmtiter.read(size) fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) return - if not str_storage_supported(TYPE): - # this happens e.g. on win32 and ARM32: we cannot read the string - # content as an array of doubles because it's not properly - # aligned. But we can read a longlong and convert to float - assert TYPE == rffi.DOUBLE - assert rffi.sizeof(TYPE) == 8 - return unpack_longlong2float(fmtiter) + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) try: # fast path val = unpack_fastpath(TYPE)(fmtiter) @@ -246,7 +247,7 @@ @specialize.argtype(0) def unpack_int_fastpath_maybe(fmtiter): - if fmtiter.bigendian != native_is_bigendian or not str_storage_supported(TYPE): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): return False try: intvalue = unpack_fastpath(TYPE)(fmtiter) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,20 +381,41 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). + """ + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) @@ -399,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py --- a/rpython/rlib/strstorage.py +++ b/rpython/rlib/strstorage.py @@ -9,54 +9,31 @@ # rstr.py:copy_string_contents), which has no chance to work during # tracing # -# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading -# from GC pointers. However: -# -# a. we would like to use a CompositeOffset as the offset (using the -# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet) -# supported before translation: it works only if you pass an actual -# integer -# -# b. raw_load from a GC pointer is not (yet) supported by the -# JIT. There are plans to introduce a gc_load operation: when it -# will be there, we could fix the issue above and actually use it to -# implement str_storage_getitem -# -# 3. the actual solution: cast rpy_string to a GcStruct which has the very +# 2. cast rpy_string to a GcStruct which has the very # same layout, with the only difference that its 'chars' field is no # longer an Array(Char) but e.e. an Array(Signed). Then, we just need to -# read the appropriate index into the array +# read the appropriate index into the array. To support this solution, +# the JIT's optimizer needed a few workarounds. This was removed. +# +# 3. use the newly introduced 'llop.gc_load_indexed'. +# -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -from rpython.rtyper.lltypesystem.rstr import STR, _get_raw_str_buf + +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.annlowlevel import llstr -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize - at specialize.memo() -def _rpy_string_as_type(TP): - # sanity check that STR is actually what we think it is - assert STR._flds == { - 'hash': lltype.Signed, - 'chars': lltype.Array(lltype.Char, hints={'immutable': True}) - } - STR_AS_TP = lltype.GcStruct('rpy_string_as_%s' % TP, - ('hash', lltype.Signed), - ('chars', lltype.Array(TP, hints={'immutable': True}))) - return STR_AS_TP - - at specialize.arg(0) -def str_storage_supported(TP): - # on some architectures (e.g. win32 and arm32) an array of longlongs needs - # to be aligned at 8 bytes boundaries, so we cannot safely cast from STR - # to STR_AS_TP. In that case, we str_storage_getitem is simply not - # supported - return rffi.sizeof(TP) <= rffi.sizeof(lltype.Signed) @specialize.ll() -def str_storage_getitem(TP, s, index): - assert str_storage_supported(TP) # sanity check - STR_AS_TP = _rpy_string_as_type(TP) +def str_storage_getitem(TP, s, byte_offset): + # WARNING: the 'byte_offset' is, as its name says, measured in bytes; + # however, it should be aligned for TP, otherwise on some platforms this + # code will crash! lls = llstr(s) - str_as_tp = rffi.cast(lltype.Ptr(STR_AS_TP), lls) - index = index / rffi.sizeof(TP) - return str_as_tp.chars[index] + base_ofs = (llmemory.offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + scale_factor = llmemory.sizeof(lltype.Char) + return llop.gc_load_indexed(TP, lls, byte_offset, + scale_factor, base_ofs) diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,3 +45,22 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 + +def test_repeated_subbuffer(): + buf = StringBuffer('x' * 10000) From pypy.commits at gmail.com Thu Jan 7 07:12:51 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 04:12:51 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: syntax error removed, simplified op.getopnum() to opnum and extracting the field before Message-ID: <568e5643.460f1c0a.8c036.6bfa@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81609:258b49aa5c84 Date: 2016-01-07 13:11 +0100 http://bitbucket.org/pypy/pypy/changeset/258b49aa5c84/ Log: syntax error removed, simplified op.getopnum() to opnum and extracting the field before transforming call_malloc_cond_varsize operations directly passed by the tests diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -203,39 +203,48 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) - if op.is_getarrayitem() or \ - op.getopnum() in (rop.GETARRAYITEM_RAW_I, - rop.GETARRAYITEM_RAW_F): + opnum = op.getopnum() + if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: + v_length = op.getarg(2) + scale = op.getarg(1).getint() + if scale not in self.cpu.load_supported_factors: + scale, offset, v_length = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + op.setarg(1, ConstInt(scale)) + op.setarg(2, v_length) + elif op.is_getarrayitem() or \ + opnum in (rop.GETARRAYITEM_RAW_I, + rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) - elif op.getopnum() in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): + elif opnum in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): self.handle_setarrayitem(op) - elif op.getopnum() == rop.RAW_STORE: + elif opnum == rop.RAW_STORE: itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, itemsize, 1, ofs) - elif op.getopnum() in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): + elif opnum in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, - rop.GETINTERIORFIELD_GC_F): + elif opnum in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, + rop.GETINTERIORFIELD_GC_F): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, fieldsize, itemsize, ofs, sign) - elif op.getopnum() in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): + elif opnum in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) - elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, - rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): + elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, + rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, + rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) if op.getopnum() in (rop.GETFIELD_GC_F, rop.GETFIELD_GC_I, rop.GETFIELD_GC_R): @@ -250,45 +259,45 @@ self.emit_op(op) return True self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.SETFIELD_GC, rop.SETFIELD_RAW): + elif opnum in (rop.SETFIELD_GC, rop.SETFIELD_RAW): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) value_box = op.getarg(1) self.emit_gc_store_or_indexed(op, ptr_box, ConstInt(0), value_box, itemsize, 1, ofs) - elif op.getopnum() == rop.ARRAYLEN_GC: + elif opnum == rop.ARRAYLEN_GC: descr = op.getdescr() assert isinstance(descr, ArrayDescr) ofs = descr.lendescr.offset self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs, NOT_SIGNED) - elif op.getopnum() == rop.STRLEN: + elif opnum == rop.STRLEN: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.UNICODELEN: + elif opnum == rop.UNICODELEN: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.STRGETITEM: + elif opnum == rop.STRGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.UNICODEGETITEM: + elif opnum == rop.UNICODEGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.STRSETITEM: + elif opnum == rop.STRSETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), itemsize, itemsize, basesize) - elif op.getopnum() == rop.UNICODESETITEM: + elif opnum == rop.UNICODESETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), @@ -790,7 +799,7 @@ scale, offset, v_length = \ self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, - [ConstInt(kind), ConstInt(itemsize), v_length], + [ConstInt(kind), ConstInt(scale), v_length], descr=arraydescr) self.replace_op_with(v_result, op) self.emit_op(op) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -313,7 +313,7 @@ 'strdescr': arraydescr}) # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.calls == [(8, 15, 10), (5, 15, 3), ('str', 3)] + assert gc_ll_descr.calls == [(8, 15, 10), (1, 15, 15), ('str', 15)] # one fit, one was too large, one was not fitting def test_malloc_slowpath(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2328,7 +2328,7 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - assert valid_addressing_size(itemsize): + assert valid_addressing_size(itemsize) shift = get_scale(itemsize) # now varsizeloc is a register != eax. The size of From pypy.commits at gmail.com Thu Jan 7 08:58:30 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 07 Jan 2016 05:58:30 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: translation fixes for the changes in rewrite.py Message-ID: <568e6f06.83e01c0a.77e8c.ffffd230@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81610:f99df61537a8 Date: 2016-01-07 14:57 +0100 http://bitbucket.org/pypy/pypy/changeset/f99df61537a8/ Log: translation fixes for the changes in rewrite.py diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -536,6 +536,7 @@ # replaced with another constant greater than 0.) #o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], # descr=arraydescr) + assert isinstance(arraydescr, ArrayDescr) scale = arraydescr.itemsize v_length_scaled = v_length if not isinstance(v_length, ConstInt): @@ -664,6 +665,7 @@ for op in self.last_zero_arrays: assert op.getopnum() == rop.ZERO_ARRAY descr = op.getdescr() + assert isinstance(descr, ArrayDescr) scale = descr.itemsize box = op.getarg(0) try: From pypy.commits at gmail.com Thu Jan 7 10:04:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:04:44 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Generalize a bit Message-ID: <568e7e8c.6408c20a.7346.48bd@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2534:cfcf3e64a2b0 Date: 2016-01-07 16:04 +0100 http://bitbucket.org/cffi/cffi/changeset/cfcf3e64a2b0/ Log: Generalize a bit diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -39,6 +39,15 @@ path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) + if 'CC' in os.environ: + args = os.environ['CC'].split() + else: + args = ['gcc'] + if 'CFLAGS' in os.environ: + args.extend(os.environ['CFLAGS'].split()) + if 'LDFLAGS' in os.environ: + args.extend(os.environ['LDFLAGS'].split()) + args.extend(['-g', filename, '-o', name, '-L.']) if '__pypy__' in sys.builtin_module_names: # xxx a bit hackish, maybe ffi.compile() should do a better job executable = os.path.abspath(sys.executable) @@ -48,13 +57,14 @@ os.symlink(libpypy_c, os.path.join(path, 'libpypy-c.so')) except OSError: pass - self._run(['gcc', '-g', filename, '-o', name, '-L.'] + - ['%s.pypy-26.so' % modname for modname in modules] + - ['-lpypy-c', '-Wl,-rpath=$ORIGIN/'] + extra) + args.extend(['%s.pypy-26.so' % modname for modname in modules]) + args.append('-lpypy-c') else: - self._run(['gcc', '-g', filename, '-o', name, '-L.'] + - ['%s.so' % modname for modname in modules] + - ['-lpython2.7', '-Wl,-rpath=$ORIGIN/'] + extra) + args.extend(['%s.so' % modname for modname in modules]) + args.append('-lpython2.7') + args.append('-Wl,-rpath=$ORIGIN/') + args.extend(extra) + self._run(args) def execute(self, name): path = self.get_path() From pypy.commits at gmail.com Thu Jan 7 10:19:54 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:19:54 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Allow platform-specific hacks to invoke the compiler Message-ID: <568e821a.82df1c0a.7469f.ffffa8c1@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2535:dd5771e43bd0 Date: 2016-01-07 16:19 +0100 http://bitbucket.org/cffi/cffi/changeset/dd5771e43bd0/ Log: Allow platform-specific hacks to invoke the compiler diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -1,3 +1,4 @@ +import py import sys, os import shutil, subprocess from testing.udir import udir @@ -35,10 +36,22 @@ env=env) self._compiled_modules.add(name) - def compile(self, name, modules, extra=[]): + def compile(self, name, modules, **flags): path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) + if sys.platform.startswith('linux'): + self._compile_linux(name, modules, **flags) + elif sys.platform.startswith('win'): + self._compile_win(name, modules, **flags) + else: + py.test.skip("don't know how to invoke the C compiler on %r" % + (sys.platform,)) + + def _compile_linux(self, name, modules, + opt=False, threads=False, defines={}): + path = self.get_path() + filename = '%s.c' % name if 'CC' in os.environ: args = os.environ['CC'].split() else: @@ -47,6 +60,10 @@ args.extend(os.environ['CFLAGS'].split()) if 'LDFLAGS' in os.environ: args.extend(os.environ['LDFLAGS'].split()) + if threads: + args.append('-pthread') + if opt: + args.append('-O2') args.extend(['-g', filename, '-o', name, '-L.']) if '__pypy__' in sys.builtin_module_names: # xxx a bit hackish, maybe ffi.compile() should do a better job @@ -63,9 +80,14 @@ args.extend(['%s.so' % modname for modname in modules]) args.append('-lpython2.7') args.append('-Wl,-rpath=$ORIGIN/') - args.extend(extra) + for key, value in sorted(defines.items()): + args.append('-D%s=%s' % (key, value)) self._run(args) + def _compile_win(self, name, modules, + opt=False, threads=False, defines={}): + xxxx + def execute(self, name): path = self.get_path() env = os.environ.copy() diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py --- a/testing/embedding/test_performance.py +++ b/testing/embedding/test_performance.py @@ -4,7 +4,7 @@ class TestPerformance(EmbeddingTests): def test_perf_single_threaded(self): self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], ['-O2']) + self.compile('perf-test', ['_perf_cffi'], opt=True) output = self.execute('perf-test') print '='*79 print output.rstrip() @@ -12,8 +12,8 @@ def test_perf_in_1_thread(self): self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], - ['-pthread', '-O2', '-DPTEST_USE_THREAD=1']) + self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '1'}) output = self.execute('perf-test') print '='*79 print output.rstrip() @@ -21,8 +21,8 @@ def test_perf_in_2_threads(self): self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], - ['-pthread', '-O2', '-DPTEST_USE_THREAD=2']) + self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '2'}) output = self.execute('perf-test') print '='*79 print output.rstrip() @@ -30,8 +30,8 @@ def test_perf_in_4_threads(self): self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], - ['-pthread', '-O2', '-DPTEST_USE_THREAD=4']) + self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '4'}) output = self.execute('perf-test') print '='*79 print output.rstrip() @@ -39,8 +39,8 @@ def test_perf_in_8_threads(self): self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], - ['-pthread', '-O2', '-DPTEST_USE_THREAD=8']) + self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '8'}) output = self.execute('perf-test') print '='*79 print output.rstrip() diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -4,7 +4,7 @@ class TestThread(EmbeddingTests): def test_first_calls_in_parallel(self): self.prepare_module('add1') - self.compile('thread1-test', ['_add1_cffi'], ['-pthread']) + self.compile('thread1-test', ['_add1_cffi'], threads=True) for i in range(50): output = self.execute('thread1-test') assert output == ("starting\n" @@ -20,7 +20,7 @@ def test_init_different_modules_in_different_threads(self): self.prepare_module('add1') self.prepare_module('add2') - self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], ['-pthread']) + self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], threads=True) output = self.execute('thread2-test') output = self._take_out(output, "preparing") output = self._take_out(output, ".") @@ -37,7 +37,7 @@ self.prepare_module('add1') self.prepare_module('add2') self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], - ['-pthread', '-DT2TEST_AGAIN_ADD1']) + threads=True, defines={'T2TEST_AGAIN_ADD1': '1'}) output = self.execute('thread2-test') output = self._take_out(output, "adding 40 and 2\n") assert output == ("starting\n" @@ -50,7 +50,7 @@ def test_load_in_parallel_more(self): self.prepare_module('add2') self.prepare_module('add3') - self.compile('thread3-test', ['_add2_cffi', '_add3_cffi'], ['-pthread']) + self.compile('thread3-test', ['_add2_cffi', '_add3_cffi'], threads=True) for i in range(150): output = self.execute('thread3-test') for j in range(10): diff --git a/testing/embedding/test_tlocal.py b/testing/embedding/test_tlocal.py --- a/testing/embedding/test_tlocal.py +++ b/testing/embedding/test_tlocal.py @@ -4,7 +4,7 @@ class TestThreadLocal(EmbeddingTests): def test_thread_local(self): self.prepare_module('tlocal') - self.compile('tlocal-test', ['_tlocal_cffi'], ['-pthread']) + self.compile('tlocal-test', ['_tlocal_cffi'], threads=True) for i in range(10): output = self.execute('tlocal-test') assert output == "done\n" From pypy.commits at gmail.com Thu Jan 7 10:37:23 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:37:23 -0800 (PST) Subject: [pypy-commit] cffi windows-tls: ready to merge Message-ID: <568e8633.2a06c20a.83684.fffff537@mx.google.com> Author: Armin Rigo Branch: windows-tls Changeset: r2536:d593251527eb Date: 2016-01-07 16:36 +0100 http://bitbucket.org/cffi/cffi/changeset/d593251527eb/ Log: ready to merge From pypy.commits at gmail.com Thu Jan 7 10:37:24 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:37:24 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge windows-tls Message-ID: <568e8634.8e371c0a.29b03.ffffb0f7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2537:c1adfab5014d Date: 2016-01-07 16:37 +0100 http://bitbucket.org/cffi/cffi/changeset/c1adfab5014d/ Log: hg merge windows-tls Port to Windows the code to avoid creating and destroying PyThreadStates all the time diff --git a/c/misc_thread_common.h b/c/misc_thread_common.h new file mode 100644 --- /dev/null +++ b/c/misc_thread_common.h @@ -0,0 +1,136 @@ +#ifndef WITH_THREAD +# error "xxx no-thread configuration not tested, please report if you need that" +#endif + + +struct cffi_tls_s { + /* The locally-made thread state. This is only non-null in case + we build the thread state here. It remains null if this thread + had already a thread state provided by CPython. */ + PyThreadState *local_thread_state; + +#ifndef USE__THREAD + /* The saved errno. If the C compiler supports '__thread', then + we use that instead. */ + int saved_errno; +#endif + +#ifdef MS_WIN32 + /* The saved lasterror, on Windows. */ + int saved_lasterror; +#endif +}; + +static struct cffi_tls_s *get_cffi_tls(void); /* in misc_thread_posix.h + or misc_win32.h */ + +static void cffi_thread_shutdown(void *p) +{ + struct cffi_tls_s *tls = (struct cffi_tls_s *)p; + + if (tls->local_thread_state != NULL) { + /* We need to re-acquire the GIL temporarily to free the + thread state. I hope it is not a problem to do it in + a thread-local destructor. + */ + PyEval_RestoreThread(tls->local_thread_state); + PyThreadState_DeleteCurrent(); + } + free(tls); +} + +/* USE__THREAD is defined by setup.py if it finds that it is + syntactically valid to use "__thread" with this C compiler. */ +#ifdef USE__THREAD + +static __thread int cffi_saved_errno = 0; +static void save_errno_only(void) { cffi_saved_errno = errno; } +static void restore_errno_only(void) { errno = cffi_saved_errno; } + +#else + +static void save_errno_only(void) +{ + int saved = errno; + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + tls->saved_errno = saved; +} + +static void restore_errno_only(void) +{ + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + errno = tls->saved_errno; +} + +#endif + + +/* Seems that CPython 3.5.1 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + +static PyThreadState *get_current_ts(void) +{ +#if PY_MAJOR_VERSION >= 3 + return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); +#else + return _PyThreadState_Current; +#endif +} + +static PyGILState_STATE gil_ensure(void) +{ + /* Called at the start of a callback. Replacement for + PyGILState_Ensure(). + */ + PyGILState_STATE result; + struct cffi_tls_s *tls; + PyThreadState *ts = PyGILState_GetThisThreadState(); + + if (ts != NULL) { + ts->gilstate_counter++; + if (ts != get_current_ts()) { + /* common case: 'ts' is our non-current thread state and + we have to make it current and acquire the GIL */ + PyEval_RestoreThread(ts); + return PyGILState_UNLOCKED; + } + else { + return PyGILState_LOCKED; + } + } + else { + /* no thread state here so far. */ + result = PyGILState_Ensure(); + assert(result == PyGILState_UNLOCKED); + + ts = PyGILState_GetThisThreadState(); + assert(ts != NULL); + assert(ts == get_current_ts()); + assert(ts->gilstate_counter >= 1); + + /* Save the now-current thread state inside our 'local_thread_state' + field, to be removed at thread shutdown */ + tls = get_cffi_tls(); + if (tls != NULL) { + tls->local_thread_state = ts; + ts->gilstate_counter++; + } + + return result; + } +} + +static void gil_release(PyGILState_STATE oldstate) +{ + PyGILState_Release(oldstate); +} diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -13,41 +13,15 @@ shut down, using a destructor on the tls key. */ -#ifdef WITH_THREAD #include +#include "misc_thread_common.h" static pthread_key_t cffi_tls_key; -struct cffi_tls_s { - /* The locally-made thread state. This is only non-null in case - we build the thread state here. It remains null if this thread - had already a thread state provided by CPython. */ - PyThreadState *local_thread_state; - - /* The saved errno. If the C compiler supports '__thread', then - we use that instead; this value is not used at all in this case. */ - int saved_errno; -}; - -static void _tls_destructor(void *p) -{ - struct cffi_tls_s *tls = (struct cffi_tls_s *)p; - - if (tls->local_thread_state != NULL) { - /* We need to re-acquire the GIL temporarily to free the - thread state. I hope it is not a problem to do it in - a thread-local destructor. - */ - PyEval_RestoreThread(tls->local_thread_state); - PyThreadState_DeleteCurrent(); - } - free(tls); -} - static void init_cffi_tls(void) { - if (pthread_key_create(&cffi_tls_key, _tls_destructor) != 0) + if (pthread_key_create(&cffi_tls_key, &cffi_thread_shutdown) != 0) PyErr_SetString(PyExc_OSError, "pthread_key_create() failed"); } @@ -71,116 +45,5 @@ return (struct cffi_tls_s *)p; } - -/* USE__THREAD is defined by setup.py if it finds that it is - syntactically valid to use "__thread" with this C compiler. */ -#ifdef USE__THREAD - -static __thread int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -#else - -static void save_errno(void) -{ - int saved = errno; - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - tls->saved_errno = saved; -} - -static void restore_errno(void) -{ - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - errno = tls->saved_errno; -} - -#endif - - -/* Seems that CPython 3.5.1 made our job harder. Did not find out how - to do that without these hacks. We can't use PyThreadState_GET(), - because that calls PyThreadState_Get() which fails an assert if the - result is NULL. */ -#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) - /* this was abruptly un-defined in 3.5.1 */ -void *volatile _PyThreadState_Current; - /* XXX simple volatile access is assumed atomic */ -# define _Py_atomic_load_relaxed(pp) (*(pp)) -#endif - - -static PyThreadState *get_current_ts(void) -{ -#if PY_MAJOR_VERSION >= 3 - return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); -#else - return _PyThreadState_Current; -#endif -} - -static PyGILState_STATE gil_ensure(void) -{ - /* Called at the start of a callback. Replacement for - PyGILState_Ensure(). - */ - PyGILState_STATE result; - struct cffi_tls_s *tls; - PyThreadState *ts = PyGILState_GetThisThreadState(); - - if (ts != NULL) { - ts->gilstate_counter++; - if (ts != get_current_ts()) { - /* common case: 'ts' is our non-current thread state and - we have to make it current and acquire the GIL */ - PyEval_RestoreThread(ts); - return PyGILState_UNLOCKED; - } - else { - return PyGILState_LOCKED; - } - } - else { - /* no thread state here so far. */ - result = PyGILState_Ensure(); - assert(result == PyGILState_UNLOCKED); - - ts = PyGILState_GetThisThreadState(); - assert(ts != NULL); - assert(ts == get_current_ts()); - assert(ts->gilstate_counter >= 1); - - /* Save the now-current thread state inside our 'local_thread_state' - field, to be removed at thread shutdown */ - tls = get_cffi_tls(); - if (tls != NULL) { - tls->local_thread_state = ts; - ts->gilstate_counter++; - } - - return result; - } -} - -static void gil_release(PyGILState_STATE oldstate) -{ - PyGILState_Release(oldstate); -} - - -#else /* !WITH_THREAD */ - -static int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } - -#endif /* !WITH_THREAD */ - - -#define save_errno_only save_errno -#define restore_errno_only restore_errno +#define save_errno save_errno_only +#define restore_errno restore_errno_only diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -1,15 +1,37 @@ #include /* for alloca() */ + /************************************************************/ /* errno and GetLastError support */ -struct cffi_errno_s { - int saved_errno; - int saved_lasterror; -}; +#include "misc_thread_common.h" static DWORD cffi_tls_index = TLS_OUT_OF_INDEXES; +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + + switch (reason_for_call) { + + case DLL_THREAD_DETACH: + if (cffi_tls_index != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(cffi_tls_index); + if (p != NULL) { + TlsSetValue(cffi_tls_index, NULL); + cffi_thread_shutdown(p); + } + } + break; + + default: + break; + } + return TRUE; +} + static void init_cffi_tls(void) { if (cffi_tls_index == TLS_OUT_OF_INDEXES) { @@ -19,28 +41,29 @@ } } -static struct cffi_errno_s *_geterrno_object(void) +static struct cffi_tls_s *get_cffi_tls(void) { LPVOID p = TlsGetValue(cffi_tls_index); if (p == NULL) { - /* XXX this malloc() leaks */ - p = malloc(sizeof(struct cffi_errno_s)); + p = malloc(sizeof(struct cffi_tls_s)); if (p == NULL) return NULL; - memset(p, 0, sizeof(struct cffi_errno_s)); + memset(p, 0, sizeof(struct cffi_tls_s)); TlsSetValue(cffi_tls_index, p); } - return (struct cffi_errno_s *)p; + return (struct cffi_tls_s *)p; } +#ifdef USE__THREAD +# error "unexpected USE__THREAD on Windows" +#endif + static void save_errno(void) { int current_err = errno; int current_lasterr = GetLastError(); - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { p->saved_errno = current_err; p->saved_lasterror = current_lasterr; @@ -48,23 +71,9 @@ /* else: cannot report the error */ } -static void save_errno_only(void) -{ - int current_err = errno; - struct cffi_errno_s *p; - - p = _geterrno_object(); - if (p != NULL) { - p->saved_errno = current_err; - } - /* else: cannot report the error */ -} - static void restore_errno(void) { - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { SetLastError(p->saved_lasterror); errno = p->saved_errno; @@ -72,16 +81,8 @@ /* else: cannot report the error */ } -static void restore_errno_only(void) -{ - struct cffi_errno_s *p; +/************************************************************/ - p = _geterrno_object(); - if (p != NULL) { - errno = p->saved_errno; - } - /* else: cannot report the error */ -} #if PY_MAJOR_VERSION >= 3 static PyObject *b_getwinerror(PyObject *self, PyObject *args, PyObject *kwds) @@ -96,8 +97,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; @@ -138,7 +138,7 @@ int len; char *s; char *s_buf = NULL; /* Free via LocalFree */ - char s_small_buf[28]; /* Room for "Windows Error 0xFFFFFFFF" */ + char s_small_buf[40]; /* Room for "Windows Error 0xFFFFFFFFFFFFFFFF" */ PyObject *v; static char *keywords[] = {"code", NULL}; @@ -146,8 +146,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; @@ -183,16 +182,6 @@ #endif -#ifdef WITH_THREAD -/* XXX should port the code from misc_thread_posix.h */ -static PyGILState_STATE gil_ensure(void) { return PyGILState_Ensure(); } -static void gil_release(PyGILState_STATE oldst) { PyGILState_Release(oldst); } -#else -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } -#endif - - /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ From pypy.commits at gmail.com Thu Jan 7 10:55:40 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:40 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: just killing the verify() line is not enough Message-ID: <568e8a7c.520e1c0a.322f7.ffffb8e7@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2538:33bea79c3df6 Date: 2016-01-07 16:47 +0100 http://bitbucket.org/cffi/cffi/changeset/33bea79c3df6/ Log: just killing the verify() line is not enough diff --git a/demo/btrfs-snap.py b/demo/btrfs-snap.py --- a/demo/btrfs-snap.py +++ b/demo/btrfs-snap.py @@ -22,6 +22,13 @@ }; """) +ffi.set_source("_btrfs_cffi", "#include ") +ffi.compile() + +# ____________________________________________________________ + + +from _btrfs_cffi import ffi, lib parser = argparse.ArgumentParser(usage=__doc__.strip()) parser.add_argument('source', help='source subvolume') @@ -38,7 +45,7 @@ args.fd = source args_buffer = ffi.buffer(args) try: - fcntl.ioctl(target, v.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) + fcntl.ioctl(target, lib.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) except IOError as e: print e sys.exit(1) From pypy.commits at gmail.com Thu Jan 7 10:55:42 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:42 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: fix the demo Message-ID: <568e8a7e.913bc20a.d29ab.6bd4@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2539:496f6485013e Date: 2016-01-07 16:47 +0100 http://bitbucket.org/cffi/cffi/changeset/496f6485013e/ Log: fix the demo diff --git a/demo/fastcsv.py b/demo/fastcsv.py --- a/demo/fastcsv.py +++ b/demo/fastcsv.py @@ -4,9 +4,8 @@ # IN-PROGRESS. See the demo at the end of the file -dialect2ffi = {} - -def _make_ffi_from_dialect(dialect): +def _make_ffi_from_dialect(dialect_name): + dialect = csv.get_dialect(dialect_name) ffi = cffi.FFI() @@ -26,7 +25,7 @@ else: d['is_escape_char'] = '&& 0' - ffi.set_source('_fastcsv', r''' + ffi.set_source('_fastcsv_' + dialect_name, r''' typedef enum { START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, @@ -237,15 +236,16 @@ } ''' % d) - return ffi, lib + ffi.compile() -def fastcsv_reader(f, dialect): - dialect = csv.get_dialect(dialect) +def fastcsv_reader(f, dialect_name): try: - ffi, lib = dialect2ffi[dialect] - except KeyError: - ffi, lib = dialect2ffi[dialect] = _make_ffi_from_dialect(dialect) + module = __import__('_fastcsv_' + dialect_name) + except ImportError: + _make_ffi_from_dialect(dialect_name) + module = __import__('_fastcsv_' + dialect_name) + ffi, lib = module.ffi, module.lib # linelen = -1 for line in f: From pypy.commits at gmail.com Thu Jan 7 10:55:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:44 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: tweaks Message-ID: <568e8a80.034cc20a.16a73.43be@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2540:9ef5c0ac4e0d Date: 2016-01-07 16:50 +0100 http://bitbucket.org/cffi/cffi/changeset/9ef5c0ac4e0d/ Log: tweaks diff --git a/demo/gmp.py b/demo/gmp.py --- a/demo/gmp.py +++ b/demo/gmp.py @@ -5,18 +5,14 @@ # http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files # -# If the build script was run immediately before this script, the cffi module -# ends up in the current directory. Make sure we can import it. -sys.path.append('.') - try: - from _gmp import ffi, lib + from _gmp_cffi import ffi, lib except ImportError: print 'run gmp_build first, then make sure the shared object is on sys.path' - sys.exit(-1) + sys.exit(1) # ffi "knows" about the declared variables and functions from the -# cdef parts of the module xclient_build created, +# cdef parts of the module created from gmp_build # lib "knows" how to call the functions from the set_source parts # of the module. @@ -27,7 +23,7 @@ if len(sys.argv) < 3: print 'call as %s bigint1, bigint2' % sys.argv[0] - sys.exit(-1) + sys.exit(2) lib.mpz_init_set_str(a, sys.argv[1], 10) # Assume decimal integers lib.mpz_init_set_str(b, sys.argv[2], 10) # Assume decimal integers diff --git a/demo/gmp_build.py b/demo/gmp_build.py --- a/demo/gmp_build.py +++ b/demo/gmp_build.py @@ -19,7 +19,7 @@ """) -ffi.set_source('_gmp', "#include ", +ffi.set_source('_gmp_cffi', "#include ", libraries=['gmp', 'm']) if __name__ == '__main__': From pypy.commits at gmail.com Thu Jan 7 10:55:47 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:47 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: tweaks Message-ID: <568e8a83.85e41c0a.310b3.ffffbeaa@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2542:d1669b7461e4 Date: 2016-01-07 16:53 +0100 http://bitbucket.org/cffi/cffi/changeset/d1669b7461e4/ Log: tweaks diff --git a/demo/readdir2.py b/demo/readdir2.py --- a/demo/readdir2.py +++ b/demo/readdir2.py @@ -5,15 +5,9 @@ if not sys.platform.startswith('linux'): raise Exception("Linux-only demo") -# If the build script was run immediately before this script, the cffi module -# ends up in the current directory. Make sure we can import it. -sys.path.append('.') +# run readdir2_build first, then make sure the shared object is on sys.path +from _readdir2_cffi import ffi, lib -try: - from _readdir2 import ffi, lib -except ImportError: - print 'run readdir2_build first, then make sure the shared object is on sys.path' - sys.exit(-1) def walk(basefd, path): print '{', path diff --git a/demo/readdir2_build.py b/demo/readdir2_build.py --- a/demo/readdir2_build.py +++ b/demo/readdir2_build.py @@ -20,7 +20,7 @@ static const int DT_DIR; """) -ffi.set_source("_readdir2", """ +ffi.set_source("_readdir2_cffi", """ #ifndef _ATFILE_SOURCE # define _ATFILE_SOURCE #endif From pypy.commits at gmail.com Thu Jan 7 10:55:49 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:49 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: tweaks Message-ID: <568e8a85.aa5dc20a.74c0.0ab3@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2543:ee6bdacc57db Date: 2016-01-07 16:54 +0100 http://bitbucket.org/cffi/cffi/changeset/ee6bdacc57db/ Log: tweaks diff --git a/demo/winclipboard.py b/demo/winclipboard.py --- a/demo/winclipboard.py +++ b/demo/winclipboard.py @@ -5,18 +5,14 @@ if not sys.platform == 'win32': raise Exception("Windows-only demo") -# If the build script was run immediately before this script, the cffi module -# ends up in the current directory. Make sure we can import it. -sys.path.append('.') - try: - from _winclipboard import ffi, lib + from _winclipboard_cffi import ffi, lib except ImportError: print 'run winclipboard_build first, then make sure the shared object is on sys.path' - sys.exit(-1) + sys.exit(1) # ffi "knows" about the declared variables and functions from the -# cdef parts of the module xclient_build created, +# cdef parts of the module _winclipboard_cffi created, # lib "knows" how to call the functions from the set_source parts # of the module. diff --git a/demo/winclipboard_build.py b/demo/winclipboard_build.py --- a/demo/winclipboard_build.py +++ b/demo/winclipboard_build.py @@ -28,7 +28,7 @@ void * memcpy(void * s1, void * s2, int n); ''') -ffi.set_source('_winclipboard', ''' +ffi.set_source('_winclipboard_cffi', ''' #include ''', libraries=["user32"]) From pypy.commits at gmail.com Thu Jan 7 10:55:51 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:51 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: tweaks Message-ID: <568e8a87.2269c20a.7eae7.39b3@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2544:c5133d5b81fe Date: 2016-01-07 16:55 +0100 http://bitbucket.org/cffi/cffi/changeset/c5133d5b81fe/ Log: tweaks diff --git a/demo/xclient.py b/demo/xclient.py --- a/demo/xclient.py +++ b/demo/xclient.py @@ -1,14 +1,8 @@ import sys, os -# If the build script was run immediately before this script, the cffi module -# ends up in the current directory. Make sure we can import it. -sys.path.append('.') +# run xclient_build first, then make sure the shared object is on sys.path +from _xclient_cffi import ffi, lib -try: - from _xclient import ffi, lib -except ImportError: - print 'run xclient_build first, then make sure the shared object is on sys.path' - sys.exit(-1) # ffi "knows" about the declared variables and functions from the # cdef parts of the module xclient_build created, diff --git a/demo/xclient_build.py b/demo/xclient_build.py --- a/demo/xclient_build.py +++ b/demo/xclient_build.py @@ -17,10 +17,9 @@ int XNextEvent(Display *display, XEvent *event_return); """) -ffi.set_source('_xclient', """ +ffi.set_source('_xclient_cffi', """ #include """, libraries=['X11']) if __name__ == '__main__': ffi.compile() - From pypy.commits at gmail.com Thu Jan 7 10:55:45 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:55:45 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: tweaks Message-ID: <568e8a81.01941c0a.31072.ffffbba7@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2541:fae45da435e2 Date: 2016-01-07 16:52 +0100 http://bitbucket.org/cffi/cffi/changeset/fae45da435e2/ Log: tweaks diff --git a/demo/pwuid.py b/demo/pwuid.py --- a/demo/pwuid.py +++ b/demo/pwuid.py @@ -1,18 +1,7 @@ import sys, os -# If the build script was run immediately before this script, the cffi module -# ends up in the current directory. Make sure we can import it. -sys.path.append('.') +# run pwuid_build first, then make sure the shared object is on sys.path +from _pwuid_cffi import ffi, lib -try: - from _pwuid import ffi, lib -except ImportError: - print 'run pwuid_build first, then make sure the shared object is on sys.path' - sys.exit(-1) - -# ffi "knows" about the declared variables and functions from the -# cdef parts of the module xclient_build created, -# lib "knows" how to call the functions from the set_source parts -# of the module. print ffi.string(lib.getpwuid(0).pw_name) diff --git a/demo/pwuid_build.py b/demo/pwuid_build.py --- a/demo/pwuid_build.py +++ b/demo/pwuid_build.py @@ -8,7 +8,7 @@ struct passwd *getpwuid(int uid); """) -ffi.set_source('_pwuid', """ // passed to the real C compiler +ffi.set_source('_pwuid_cffi', """ // passed to the real C compiler #include #include """) From pypy.commits at gmail.com Thu Jan 7 10:56:01 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:56:01 -0800 (PST) Subject: [pypy-commit] cffi demo-cleanup: ready to merge Message-ID: <568e8a91.8a5a1c0a.fb76a.ffffb683@mx.google.com> Author: Armin Rigo Branch: demo-cleanup Changeset: r2545:d672723c75b4 Date: 2016-01-07 16:55 +0100 http://bitbucket.org/cffi/cffi/changeset/d672723c75b4/ Log: ready to merge From pypy.commits at gmail.com Thu Jan 7 10:56:03 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 07:56:03 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge demo-cleanup Message-ID: <568e8a93.08e11c0a.3c6f8.ffffb5ae@mx.google.com> Author: Armin Rigo Branch: Changeset: r2546:89e4262a2073 Date: 2016-01-07 16:55 +0100 http://bitbucket.org/cffi/cffi/changeset/89e4262a2073/ Log: hg merge demo-cleanup Thanks Matti :-) diff --git a/demo/btrfs-snap.py b/demo/btrfs-snap.py --- a/demo/btrfs-snap.py +++ b/demo/btrfs-snap.py @@ -22,10 +22,14 @@ }; """) -v = ffi.verify("#include ") +ffi.set_source("_btrfs_cffi", "#include ") +ffi.compile() +# ____________________________________________________________ +from _btrfs_cffi import ffi, lib + parser = argparse.ArgumentParser(usage=__doc__.strip()) parser.add_argument('source', help='source subvolume') parser.add_argument('target', help='target directory') @@ -41,7 +45,7 @@ args.fd = source args_buffer = ffi.buffer(args) try: - fcntl.ioctl(target, v.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) + fcntl.ioctl(target, lib.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) except IOError as e: print e sys.exit(1) diff --git a/demo/fastcsv.py b/demo/fastcsv.py --- a/demo/fastcsv.py +++ b/demo/fastcsv.py @@ -4,9 +4,8 @@ # IN-PROGRESS. See the demo at the end of the file -dialect2ffi = {} - -def _make_ffi_from_dialect(dialect): +def _make_ffi_from_dialect(dialect_name): + dialect = csv.get_dialect(dialect_name) ffi = cffi.FFI() @@ -26,7 +25,7 @@ else: d['is_escape_char'] = '&& 0' - lib = ffi.verify(r''' + ffi.set_source('_fastcsv_' + dialect_name, r''' typedef enum { START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, @@ -237,15 +236,16 @@ } ''' % d) - return ffi, lib + ffi.compile() -def fastcsv_reader(f, dialect): - dialect = csv.get_dialect(dialect) +def fastcsv_reader(f, dialect_name): try: - ffi, lib = dialect2ffi[dialect] - except KeyError: - ffi, lib = dialect2ffi[dialect] = _make_ffi_from_dialect(dialect) + module = __import__('_fastcsv_' + dialect_name) + except ImportError: + _make_ffi_from_dialect(dialect_name) + module = __import__('_fastcsv_' + dialect_name) + ffi, lib = module.ffi, module.lib # linelen = -1 for line in f: diff --git a/demo/gmp.py b/demo/gmp.py --- a/demo/gmp.py +++ b/demo/gmp.py @@ -1,33 +1,30 @@ import sys -import cffi - # # This is only a demo based on the GMP library. -# There is a rather more complete version available at: +# There is a rather more complete (but perhaps outdated) version available at: # http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files # -ffi = cffi.FFI() +try: + from _gmp_cffi import ffi, lib +except ImportError: + print 'run gmp_build first, then make sure the shared object is on sys.path' + sys.exit(1) -ffi.cdef(""" - - typedef struct { ...; } MP_INT; - typedef MP_INT mpz_t[1]; - - int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base); - void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2); - char * mpz_get_str (char *string, int base, MP_INT *integer); - -""") - -lib = ffi.verify("#include ", - libraries=['gmp', 'm']) +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module created from gmp_build +# lib "knows" how to call the functions from the set_source parts +# of the module. # ____________________________________________________________ a = ffi.new("mpz_t") b = ffi.new("mpz_t") +if len(sys.argv) < 3: + print 'call as %s bigint1, bigint2' % sys.argv[0] + sys.exit(2) + lib.mpz_init_set_str(a, sys.argv[1], 10) # Assume decimal integers lib.mpz_init_set_str(b, sys.argv[2], 10) # Assume decimal integers lib.mpz_add(a, a, b) # a=a+b diff --git a/demo/gmp_build.py b/demo/gmp_build.py new file mode 100644 --- /dev/null +++ b/demo/gmp_build.py @@ -0,0 +1,27 @@ +import cffi + +# +# This is only a demo based on the GMP library. +# There is a rather more complete (but perhaps outdated) version available at: +# http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files +# + +ffi = cffi.FFI() + +ffi.cdef(""" + + typedef struct { ...; } MP_INT; + typedef MP_INT mpz_t[1]; + + int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base); + void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2); + char * mpz_get_str (char *string, int base, MP_INT *integer); + +""") + +ffi.set_source('_gmp_cffi', "#include ", + libraries=['gmp', 'm']) + +if __name__ == '__main__': + ffi.compile() + diff --git a/demo/pwuid.py b/demo/pwuid.py --- a/demo/pwuid.py +++ b/demo/pwuid.py @@ -1,14 +1,7 @@ -from cffi import FFI -ffi = FFI() -ffi.cdef(""" // some declarations from the man page - struct passwd { - char *pw_name; - ...; - }; - struct passwd *getpwuid(int uid); -""") -C = ffi.verify(""" // passed to the real C compiler -#include -#include -""") -print ffi.string(C.getpwuid(0).pw_name) +import sys, os + +# run pwuid_build first, then make sure the shared object is on sys.path +from _pwuid_cffi import ffi, lib + + +print ffi.string(lib.getpwuid(0).pw_name) diff --git a/demo/pwuid_build.py b/demo/pwuid_build.py new file mode 100644 --- /dev/null +++ b/demo/pwuid_build.py @@ -0,0 +1,18 @@ +from cffi import FFI +ffi = FFI() +ffi.cdef(""" // some declarations from the man page + struct passwd { + char *pw_name; + ...; + }; + struct passwd *getpwuid(int uid); +""") + +ffi.set_source('_pwuid_cffi', """ // passed to the real C compiler +#include +#include +""") + + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/readdir2.py b/demo/readdir2.py --- a/demo/readdir2.py +++ b/demo/readdir2.py @@ -1,11 +1,13 @@ -# A Linux-only demo, using verify() instead of hard-coding the exact layouts +# A Linux-only demo, using set_source() instead of hard-coding the exact layouts # import sys -from _readdir2 import ffi, lib if not sys.platform.startswith('linux'): raise Exception("Linux-only demo") +# run readdir2_build first, then make sure the shared object is on sys.path +from _readdir2_cffi import ffi, lib + def walk(basefd, path): print '{', path diff --git a/demo/readdir2_build.py b/demo/readdir2_build.py --- a/demo/readdir2_build.py +++ b/demo/readdir2_build.py @@ -20,7 +20,7 @@ static const int DT_DIR; """) -ffi.set_source("_readdir2", """ +ffi.set_source("_readdir2_cffi", """ #ifndef _ATFILE_SOURCE # define _ATFILE_SOURCE #endif diff --git a/demo/winclipboard.py b/demo/winclipboard.py --- a/demo/winclipboard.py +++ b/demo/winclipboard.py @@ -1,60 +1,40 @@ __author__ = "Israel Fruchter " -from cffi import FFI +import sys, os -ffi = FFI() -ffi.cdef(''' - typedef void * HANDLE; - typedef HANDLE HWND; - typedef int BOOL; - typedef unsigned int UINT; - typedef int SIZE_T; - typedef char * LPTSTR; - typedef HANDLE HGLOBAL; - typedef HANDLE LPVOID; +if not sys.platform == 'win32': + raise Exception("Windows-only demo") - HWND GetConsoleWindow(void); +try: + from _winclipboard_cffi import ffi, lib +except ImportError: + print 'run winclipboard_build first, then make sure the shared object is on sys.path' + sys.exit(1) - LPVOID GlobalLock( HGLOBAL hMem ); - BOOL GlobalUnlock( HGLOBAL hMem ); - HGLOBAL GlobalAlloc(UINT uFlags, SIZE_T dwBytes); - - BOOL OpenClipboard(HWND hWndNewOwner); - BOOL CloseClipboard(void); - BOOL EmptyClipboard(void); - HANDLE SetClipboardData(UINT uFormat, HANDLE hMem); - - #define CF_TEXT ... - #define GMEM_MOVEABLE ... - - void * memcpy(void * s1, void * s2, int n); - ''') - -lib = ffi.verify(''' - #include -''', libraries=["user32"]) - -globals().update(lib.__dict__) +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module _winclipboard_cffi created, +# lib "knows" how to call the functions from the set_source parts +# of the module. def CopyToClipboard(string): ''' use win32 api to copy `string` to the clipboard ''' - hWnd = GetConsoleWindow() + hWnd = lib.GetConsoleWindow() - if OpenClipboard(hWnd): + if lib.OpenClipboard(hWnd): cstring = ffi.new("char[]", string) size = ffi.sizeof(cstring) # make it a moveable memory for other processes - hGlobal = GlobalAlloc(GMEM_MOVEABLE, size) - buffer = GlobalLock(hGlobal) - memcpy(buffer, cstring, size) - GlobalUnlock(hGlobal) + hGlobal = lib.GlobalAlloc(lib.GMEM_MOVEABLE, size) + buffer = lib.GlobalLock(hGlobal) + lib.memcpy(buffer, cstring, size) + lib.GlobalUnlock(hGlobal) - res = EmptyClipboard() - res = SetClipboardData(CF_TEXT, buffer) + res = lib.EmptyClipboard() + res = lib.SetClipboardData(lib.CF_TEXT, buffer) - CloseClipboard() + lib.CloseClipboard() CopyToClipboard("hello world from cffi") diff --git a/demo/winclipboard_build.py b/demo/winclipboard_build.py new file mode 100644 --- /dev/null +++ b/demo/winclipboard_build.py @@ -0,0 +1,36 @@ +from cffi import FFI + +ffi = FFI() +ffi.cdef(''' + typedef void * HANDLE; + typedef HANDLE HWND; + typedef int BOOL; + typedef unsigned int UINT; + typedef int SIZE_T; + typedef char * LPTSTR; + typedef HANDLE HGLOBAL; + typedef HANDLE LPVOID; + + HWND GetConsoleWindow(void); + + LPVOID GlobalLock( HGLOBAL hMem ); + BOOL GlobalUnlock( HGLOBAL hMem ); + HGLOBAL GlobalAlloc(UINT uFlags, SIZE_T dwBytes); + + BOOL OpenClipboard(HWND hWndNewOwner); + BOOL CloseClipboard(void); + BOOL EmptyClipboard(void); + HANDLE SetClipboardData(UINT uFormat, HANDLE hMem); + + #define CF_TEXT ... + #define GMEM_MOVEABLE ... + + void * memcpy(void * s1, void * s2, int n); + ''') + +ffi.set_source('_winclipboard_cffi', ''' + #include +''', libraries=["user32"]) + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/xclient.py b/demo/xclient.py --- a/demo/xclient.py +++ b/demo/xclient.py @@ -1,40 +1,27 @@ -from cffi import FFI +import sys, os -ffi = FFI() -ffi.cdef(""" +# run xclient_build first, then make sure the shared object is on sys.path +from _xclient_cffi import ffi, lib -typedef ... Display; -typedef struct { ...; } Window; -typedef struct { int type; ...; } XEvent; +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module xclient_build created, +# lib "knows" how to call the functions from the set_source parts +# of the module. -Display *XOpenDisplay(char *display_name); -Window DefaultRootWindow(Display *display); -int XMapRaised(Display *display, Window w); -Window XCreateSimpleWindow(Display *display, Window parent, int x, int y, - unsigned int width, unsigned int height, - unsigned int border_width, unsigned long border, - unsigned long background); -int XNextEvent(Display *display, XEvent *event_return); -""") -lib = ffi.verify(""" -#include -""", libraries=['X11']) - -globals().update(lib.__dict__) class XError(Exception): pass def main(): - display = XOpenDisplay(ffi.NULL) + display = lib.XOpenDisplay(ffi.NULL) if display == ffi.NULL: raise XError("cannot open display") - w = XCreateSimpleWindow(display, DefaultRootWindow(display), + w = lib.XCreateSimpleWindow(display, lib.DefaultRootWindow(display), 10, 10, 500, 350, 0, 0, 0) - XMapRaised(display, w) + lib.XMapRaised(display, w) event = ffi.new("XEvent *") - XNextEvent(display, event) + lib.XNextEvent(display, event) if __name__ == '__main__': main() diff --git a/demo/xclient_build.py b/demo/xclient_build.py new file mode 100644 --- /dev/null +++ b/demo/xclient_build.py @@ -0,0 +1,25 @@ +from cffi import FFI +ffi = FFI() +ffi.cdef(""" + +typedef ... Display; +typedef struct { ...; } Window; + +typedef struct { int type; ...; } XEvent; + +Display *XOpenDisplay(char *display_name); +Window DefaultRootWindow(Display *display); +int XMapRaised(Display *display, Window w); +Window XCreateSimpleWindow(Display *display, Window parent, int x, int y, + unsigned int width, unsigned int height, + unsigned int border_width, unsigned long border, + unsigned long background); +int XNextEvent(Display *display, XEvent *event_return); +""") + +ffi.set_source('_xclient_cffi', """ + #include +""", libraries=['X11']) + +if __name__ == '__main__': + ffi.compile() From pypy.commits at gmail.com Thu Jan 7 17:05:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 14:05:38 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Fix ffi.compile() to automatically link with the python library Message-ID: <568ee132.ca56c20a.75c6b.3add@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2547:c3b68c2839d1 Date: 2016-01-07 18:20 +0100 http://bitbucket.org/cffi/cffi/changeset/c3b68c2839d1/ Log: Fix ffi.compile() to automatically link with the python library diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -535,6 +535,20 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + libraries = kwds.get('libraries', []) + if pythonlib not in libraries: + kwds['libraries'] = libraries + [pythonlib] + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1365,6 +1365,8 @@ if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + if ffi._embedding_init_code is not None: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -18,12 +18,6 @@ return x + y """) -ffi.set_source("_embedding_cffi", """ -""") +ffi.set_source("_embedding_cffi", "") -#ffi.compile() -- should be fixed to do the right thing - -ffi.emit_c_code('_embedding_cffi.c') -# then call the compiler manually with the proper options, like: -# gcc -shared -fPIC _embedding_cffi.c -o _embedding_cffi.so -lpython2.7 -# -I/usr/include/python2.7 +ffi.compile(verbose=True) From pypy.commits at gmail.com Thu Jan 7 17:05:40 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 14:05:40 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: In-progress: fix tests to attempt to use distutils also for compiling Message-ID: <568ee134.8673c20a.386b4.ffffe44d@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2548:2d42a1e6d060 Date: 2016-01-07 23:05 +0100 http://bitbucket.org/cffi/cffi/changeset/2d42a1e6d060/ Log: In-progress: fix tests to attempt to use distutils also for compiling the executable diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -537,14 +537,17 @@ def _apply_embedding_fix(self, kwds): # must include an argument like "-lpython2.7" for the compiler - if sys.platform == "win32": - template = "python%d%d" - if sys.flags.debug: - template = template + '_d' + if '__pypy__' in sys.builtin_module_names: + pythonlib = "pypy-c" else: - template = "python%d.%d" - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) libraries = kwds.get('libraries', []) if pythonlib not in libraries: kwds['libraries'] = libraries + [pythonlib] diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -28,4 +28,5 @@ ffi.set_source("_add1_cffi", """ """) -ffi.compile(verbose=True) +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -24,4 +24,5 @@ ffi.set_source("_add2_cffi", """ """) -ffi.compile() +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py --- a/testing/embedding/add3.py +++ b/testing/embedding/add3.py @@ -19,4 +19,5 @@ ffi.set_source("_add3_cffi", """ """) -ffi.compile() +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -24,4 +24,5 @@ int (*my_callback)(int); """) -ffi.compile() +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn diff --git a/testing/embedding/perf.py b/testing/embedding/perf.py --- a/testing/embedding/perf.py +++ b/testing/embedding/perf.py @@ -17,4 +17,5 @@ ffi.set_source("_perf_cffi", """ """) -ffi.compile(verbose=True) +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -1,24 +1,30 @@ import py -import sys, os -import shutil, subprocess +import sys, os, re +import shutil, subprocess, time from testing.udir import udir local_dir = os.path.dirname(os.path.abspath(__file__)) class EmbeddingTests: - _compiled_modules = set() + _compiled_modules = {} + + def setup_method(self, meth): + self._path = udir.join('embedding', meth.__name__) def get_path(self): - return str(udir.ensure('embedding', dir=True)) + return str(self._path.ensure(dir=1)) def _run(self, args, env=None): print(args) - popen = subprocess.Popen(args, env=env, cwd=self.get_path()) + popen = subprocess.Popen(args, env=env, cwd=self.get_path(), stdout=subprocess.PIPE) + output = popen.stdout.read() err = popen.wait() if err: raise OSError("popen failed with exit code %r: %r" % ( err, args)) + print(output.rstrip()) + return output def prepare_module(self, name): if name not in self._compiled_modules: @@ -27,71 +33,42 @@ # NOTE: if you have an .egg globally installed with an older # version of cffi, this will not work, because sys.path ends # up with the .egg before the PYTHONPATH entries. I didn't - # find a solution to that: we can hack sys.path inside the + # find a solution to that: we could hack sys.path inside the # script run here, but we can't hack it in the same way in # execute(). env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) - self._run([sys.executable, os.path.join(local_dir, filename)], - env=env) - self._compiled_modules.add(name) + output = self._run([sys.executable, os.path.join(local_dir, filename)], + env=env) + match = re.compile(r"\bFILENAME: (.+)").search(output) + assert match + dynamic_lib_name = match.group(1) + self._compiled_modules[name] = dynamic_lib_name + return self._compiled_modules[name] - def compile(self, name, modules, **flags): + def compile(self, name, modules, opt=False, threads=False, defines={}): path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) - if sys.platform.startswith('linux'): - self._compile_linux(name, modules, **flags) - elif sys.platform.startswith('win'): - self._compile_win(name, modules, **flags) - else: - py.test.skip("don't know how to invoke the C compiler on %r" % - (sys.platform,)) - - def _compile_linux(self, name, modules, - opt=False, threads=False, defines={}): - path = self.get_path() - filename = '%s.c' % name - if 'CC' in os.environ: - args = os.environ['CC'].split() - else: - args = ['gcc'] - if 'CFLAGS' in os.environ: - args.extend(os.environ['CFLAGS'].split()) - if 'LDFLAGS' in os.environ: - args.extend(os.environ['LDFLAGS'].split()) - if threads: - args.append('-pthread') - if opt: - args.append('-O2') - args.extend(['-g', filename, '-o', name, '-L.']) - if '__pypy__' in sys.builtin_module_names: - # xxx a bit hackish, maybe ffi.compile() should do a better job - executable = os.path.abspath(sys.executable) - libpypy_c = os.path.join(os.path.dirname(executable), - 'libpypy-c.so') - try: - os.symlink(libpypy_c, os.path.join(path, 'libpypy-c.so')) - except OSError: - pass - args.extend(['%s.pypy-26.so' % modname for modname in modules]) - args.append('-lpypy-c') - else: - args.extend(['%s.so' % modname for modname in modules]) - args.append('-lpython2.7') - args.append('-Wl,-rpath=$ORIGIN/') - for key, value in sorted(defines.items()): - args.append('-D%s=%s' % (key, value)) - self._run(args) - - def _compile_win(self, name, modules, - opt=False, threads=False, defines={}): - xxxx + import distutils.ccompiler + curdir = os.getcwd() + try: + os.chdir(self.get_path()) + c = distutils.ccompiler.new_compiler() + print('compiling %s with %r' % (name, modules)) + extra_preargs = [] + if threads and sys.platform != 'win32': + extra_preargs.append('-pthread') + objects = c.compile([filename], macros=sorted(defines.items()), debug=True) + c.link_executable(objects + modules, name, extra_preargs=extra_preargs) + finally: + os.chdir(curdir) def execute(self, name): path = self.get_path() env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + env['LD_LIBRARY_PATH'] = path print 'running %r in %r' % (name, path) popen = subprocess.Popen([name], cwd=path, env=env, stdout=subprocess.PIPE) @@ -104,8 +81,8 @@ class TestBasic(EmbeddingTests): def test_basic(self): - self.prepare_module('add1') - self.compile('add1-test', ['_add1_cffi']) + add1_cffi = self.prepare_module('add1') + self.compile('add1-test', [add1_cffi]) output = self.execute('add1-test') assert output == ("preparing...\n" "adding 40 and 2\n" @@ -113,9 +90,9 @@ "got: 42 95\n") def test_two_modules(self): - self.prepare_module('add1') - self.prepare_module('add2') - self.compile('add2-test', ['_add1_cffi', '_add2_cffi']) + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('add2-test', [add1_cffi, add2_cffi]) output = self.execute('add2-test') assert output == ("preparing...\n" "adding 40 and 2\n" diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py --- a/testing/embedding/test_performance.py +++ b/testing/embedding/test_performance.py @@ -3,16 +3,16 @@ class TestPerformance(EmbeddingTests): def test_perf_single_threaded(self): - self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], opt=True) + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True) output = self.execute('perf-test') print '='*79 print output.rstrip() print '='*79 def test_perf_in_1_thread(self): - self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '1'}) output = self.execute('perf-test') print '='*79 @@ -20,8 +20,8 @@ print '='*79 def test_perf_in_2_threads(self): - self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '2'}) output = self.execute('perf-test') print '='*79 @@ -29,8 +29,8 @@ print '='*79 def test_perf_in_4_threads(self): - self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '4'}) output = self.execute('perf-test') print '='*79 @@ -38,8 +38,8 @@ print '='*79 def test_perf_in_8_threads(self): - self.prepare_module('perf') - self.compile('perf-test', ['_perf_cffi'], opt=True, threads=True, + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '8'}) output = self.execute('perf-test') print '='*79 diff --git a/testing/embedding/test_recursive.py b/testing/embedding/test_recursive.py --- a/testing/embedding/test_recursive.py +++ b/testing/embedding/test_recursive.py @@ -3,8 +3,8 @@ class TestRecursive(EmbeddingTests): def test_recursive(self): - self.prepare_module('add_recursive') - self.compile('add_recursive-test', ['_add_recursive_cffi']) + add_recursive_cffi = self.prepare_module('add_recursive') + self.compile('add_recursive-test', [add_recursive_cffi]) output = self.execute('add_recursive-test') assert output == ("preparing REC\n" "some_callback(400)\n" diff --git a/testing/embedding/test_thread.py b/testing/embedding/test_thread.py --- a/testing/embedding/test_thread.py +++ b/testing/embedding/test_thread.py @@ -3,8 +3,8 @@ class TestThread(EmbeddingTests): def test_first_calls_in_parallel(self): - self.prepare_module('add1') - self.compile('thread1-test', ['_add1_cffi'], threads=True) + add1_cffi = self.prepare_module('add1') + self.compile('thread1-test', [add1_cffi], threads=True) for i in range(50): output = self.execute('thread1-test') assert output == ("starting\n" @@ -18,9 +18,9 @@ return text[:i] + text[i+len(content):] def test_init_different_modules_in_different_threads(self): - self.prepare_module('add1') - self.prepare_module('add2') - self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], threads=True) + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], threads=True) output = self.execute('thread2-test') output = self._take_out(output, "preparing") output = self._take_out(output, ".") @@ -34,9 +34,9 @@ "done\n") def test_alt_issue(self): - self.prepare_module('add1') - self.prepare_module('add2') - self.compile('thread2-test', ['_add1_cffi', '_add2_cffi'], + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], threads=True, defines={'T2TEST_AGAIN_ADD1': '1'}) output = self.execute('thread2-test') output = self._take_out(output, "adding 40 and 2\n") @@ -48,9 +48,9 @@ "done\n") def test_load_in_parallel_more(self): - self.prepare_module('add2') - self.prepare_module('add3') - self.compile('thread3-test', ['_add2_cffi', '_add3_cffi'], threads=True) + add2_cffi = self.prepare_module('add2') + add3_cffi = self.prepare_module('add3') + self.compile('thread3-test', [add2_cffi, add3_cffi], threads=True) for i in range(150): output = self.execute('thread3-test') for j in range(10): diff --git a/testing/embedding/test_tlocal.py b/testing/embedding/test_tlocal.py --- a/testing/embedding/test_tlocal.py +++ b/testing/embedding/test_tlocal.py @@ -3,8 +3,8 @@ class TestThreadLocal(EmbeddingTests): def test_thread_local(self): - self.prepare_module('tlocal') - self.compile('tlocal-test', ['_tlocal_cffi'], threads=True) + tlocal_cffi = self.prepare_module('tlocal') + self.compile('tlocal-test', [tlocal_cffi], threads=True) for i in range(10): output = self.execute('tlocal-test') assert output == "done\n" diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py --- a/testing/embedding/tlocal.py +++ b/testing/embedding/tlocal.py @@ -24,4 +24,5 @@ ffi.set_source("_tlocal_cffi", """ """) -ffi.compile(verbose=True) +fn = ffi.compile(verbose=True) +print 'FILENAME:', fn From pypy.commits at gmail.com Thu Jan 7 18:38:17 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 07 Jan 2016 15:38:17 -0800 (PST) Subject: [pypy-commit] pypy default: review the skips in test_optimizebasic Message-ID: <568ef6e9.ca061c0a.cd9b4.4816@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81611:7a618a8a60ec Date: 2016-01-08 00:37 +0100 http://bitbucket.org/pypy/pypy/changeset/7a618a8a60ec/ Log: review the skips in test_optimizebasic - killed the really old tests that relied on optimizefindnode machinery - fixed and enabled a few - the remaining ones look actually useful diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -696,58 +696,6 @@ # ---------- - def test_virtual_1(self): - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - setfield_gc(p0, i1, descr=valuedescr) - jump(i, p0) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_float(self): - ops = """ - [f, p0] - f0 = getfield_gc(p0, descr=floatdescr) - f1 = float_add(f0, f) - setfield_gc(p0, f1, descr=floatdescr) - jump(f, p0) - """ - expected = """ - [f, f2] - f1 = float_add(f2, f) - jump(f, f1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected) - - def test_virtual_2(self): - py.test.skip("XXX") - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - p1 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p1, i1, descr=valuedescr) - jump(i, p1) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_oois(self): ops = """ [p0, p1, p2] @@ -774,20 +722,6 @@ guard_false(i12) [] jump(p0, p1, p2) """ - expected = """ - [p2] - # all constant-folded :-) - jump(p2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable), - Virtual(node_vtable), - Not''', - expected) - # - # to be complete, we also check the no-opt case where most comparisons - # are not removed. The exact set of comparisons removed depends on - # the details of the algorithm... expected2 = """ [p0, p1, p2] guard_nonnull(p0) [] @@ -801,26 +735,6 @@ """ self.optimize_loop(ops, expected2) - def test_virtual_default_field(self): - py.test.skip("XXX") - ops = """ - [p0] - i0 = getfield_gc(p0, descr=valuedescr) - guard_value(i0, 0) [] - p1 = new_with_vtable(ConstClass(node_vtable)) - # the field 'value' has its default value of 0 - jump(p1) - """ - expected = """ - [i] - guard_value(i, 0) [] - jump(0) - """ - # the 'expected' is sub-optimal, but it should be done by another later - # optimization step. See test_find_nodes_default_field() for why. - self.optimize_loop(ops, 'Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_3(self): ops = """ [i] @@ -837,55 +751,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_4(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - jump(i3, p1) - """ - expected = """ - [i0, i1] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_5(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p2 = new_with_vtable(descr=nodesize2) - setfield_gc(p2, i1, descr=valuedescr) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - setfield_gc(p1, p2, descr=nextdescr) - jump(i3, p1) - """ - expected = """ - [i0, i1, i1bis] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2, i1) - """ - self.optimize_loop(ops, - '''Not, Virtual(node_vtable, - valuedescr=Not, - nextdescr=Virtual(node_vtable2, - valuedescr=Not))''', - expected) - def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1209,27 +1074,6 @@ """ self.optimize_loop(ops, expected) - def test_varray_2(self): - ops = """ - [i0, p1] - i1 = getarrayitem_gc(p1, 0, descr=arraydescr) - i2 = getarrayitem_gc(p1, 1, descr=arraydescr) - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - p2 = new_array(2, descr=arraydescr) - setarrayitem_gc(p2, 1, i0, descr=arraydescr) - setarrayitem_gc(p2, 0, 20, descr=arraydescr) - jump(i0, p2) - """ - expected = """ - [i0, i1, i2] - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - jump(i0, 20, i0) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr, Not, Not)', expected) - def test_p123_array(self): ops = """ [i1, p2, p3] @@ -1264,23 +1108,6 @@ """ self.optimize_loop(ops, expected) - def test_vstruct_1(self): - py.test.skip("XXX") - ops = """ - [i1, p2] - i2 = getfield_gc(p2, descr=adescr) - escape_n(i2) - p3 = new(descr=ssize) - setfield_gc(p3, i1, descr=adescr) - jump(i1, p3) - """ - expected = """ - [i1, i2] - escape_n(i2) - jump(i1, i1) - """ - self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected) - def test_p123_vstruct(self): ops = """ [i1, p2, p3] @@ -1443,26 +1270,6 @@ """ self.optimize_loop(ops, expected) - def test_duplicate_getfield_guard_value_const(self): - ops = """ - [p1] - guard_value(p1, ConstPtr(myptr)) [] - i1 = getfield_gc_i(p1, descr=valuedescr) - i2 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i2) - jump(p1) - """ - expected = """ - [] - i1 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i1) - jump() - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr)', expected) - def test_duplicate_getfield_sideeffects_1(self): ops = """ [p1] @@ -1688,12 +1495,12 @@ jump(p1, i1, i2) """ expected = """ - [i1, i2] + [p1, i1, i2] + guard_value(p1, ConstPtr(myptr)) [] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) - jump(i1, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr), Not, Not', expected) + jump(ConstPtr(myptr), i1, i2) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_1(self): ops = """ @@ -1870,163 +1677,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_1(self): - ops = """ - [i0, p1] - p4 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_with_vtable(descr=nodesize) - p3 = escape_r() - setfield_gc(p2, p3, descr=nextdescr) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, nextdescr=Not)', - expected) - - def test_bug_2(self): - ops = """ - [i0, p1] - p4 = getarrayitem_gc(p1, 0, descr=arraydescr2) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_array(1, descr=arraydescr2) - p3 = escape_r() - setarrayitem_gc(p2, 0, p3, descr=arraydescr2) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr2, Not)', - expected) - - def test_bug_3(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - p2a = new_with_vtable(descr=nodesize) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_bug_3bis(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_invalid_loop_1(self): - ops = """ - [p1] - guard_isnull(p1) [] - # - p2 = new_with_vtable(descr=nodesize) - jump(p2) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, - ops, 'Virtual(node_vtable)', None) - - def test_invalid_loop_2(self): - py.test.skip("this would fail if we had Fixed again in the specnodes") - ops = """ - [p1] - guard_class(p1, ConstClass(node_vtable2)) [] - # - p2 = new_with_vtable(descr=nodesize) - escape_n(p2) # prevent it from staying Virtual - jump(p2) - """ - py.test.raises(InvalidLoop, self.optimize_loop, - ops, '...', None) - - def test_invalid_loop_3(self): - ops = """ - [p1] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_isnull(p2) [] - # - p3 = new_with_vtable(descr=nodesize) - p4 = new_with_vtable(descr=nodesize) - setfield_gc(p3, p4, descr=nextdescr) - jump(p3) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, ops, - 'Virtual(node_vtable, nextdescr=Virtual(node_vtable))', - None) - def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2060,7 +1711,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2078,7 +1728,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2625,26 +2274,6 @@ where p2 is a node_vtable, valuedescr=i2, nextdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_6(self): - ops = """ - [p0, i0, i1] - guard_true(i0) [p0] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i1, i1) - """ - expected = """ - [i1b, i0, i1] - guard_true(i0) [i1b] - jump(i1, i1, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable, valuedescr=Not), - Not, Not''', expected) - self.check_expanded_fail_descr('''p0 - where p0 is a node_vtable, valuedescr=i1b - ''', rop.GUARD_TRUE) - def test_expand_fail_varray(self): ops = """ [i1] @@ -2686,47 +2315,6 @@ where p2 is a vstruct ssize, adescr=i1, bdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_v_all_1(self): - ops = """ - [i1, p1a, i2] - p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) - p7v = getfield_gc_r(p6s, descr=bdescr) - p5s = new(descr=ssize) - setfield_gc(p5s, i2, descr=adescr) - setfield_gc(p5s, p7v, descr=bdescr) - setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) - guard_true(i1) [p1a] - p2s = new(descr=ssize) - p3v = new_with_vtable(descr=nodesize) - p4a = new_array(2, descr=arraydescr2) - setfield_gc(p2s, i1, descr=adescr) - setfield_gc(p2s, p3v, descr=bdescr) - setfield_gc(p3v, i2, descr=valuedescr) - setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) - jump(i1, p4a, i2) - """ - expected = """ - [i1, ia, iv, pnull, i2] - guard_true(i1) [ia, iv, i2] - jump(1, 1, i2, NULL, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, ''' - Not, - VArray(arraydescr2, - VStruct(ssize, - adescr=Not, - bdescr=Virtual(node_vtable, - valuedescr=Not)), - Not), - Not''', expected) - self.check_expanded_fail_descr('''p1a - where p1a is a varray arraydescr2: p6s, p5s - where p6s is a vstruct ssize, adescr=ia, bdescr=p7v - where p5s is a vstruct ssize, adescr=i2, bdescr=p7v - where p7v is a node_vtable, valuedescr=iv - ''', rop.GUARD_TRUE) - def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] @@ -5179,6 +4767,8 @@ """ self.optimize_loop(ops, expected) + def test_intmod_bounds_harder(self): + py.test.skip("harder") # Of course any 'maybe-negative % power-of-two' can be turned into # int_and(), but that's a bit harder to detect here because it turns # into several operations, and of course it is wrong to just turn @@ -5196,7 +4786,6 @@ i4 = int_and(i0, 15) finish(i4) """ - py.test.skip("harder") self.optimize_loop(ops, expected) def test_intmod_bounds_bug1(self): @@ -5357,7 +4946,7 @@ i5 = int_lt(i2, i1) guard_true(i5) [] - i6 = getarrayitem_gc(p0, i2) + i6 = getarrayitem_gc_i(p0, i2, descr=chararraydescr) finish(i6) """ expected = """ @@ -5368,7 +4957,7 @@ i4 = int_lt(i2, i0) guard_true(i4) [] - i6 = getarrayitem_gc(p0, i3) + i6 = getarrayitem_gc_i(p0, i3, descr=chararraydescr) finish(i6) """ self.optimize_loop(ops, expected) From pypy.commits at gmail.com Thu Jan 7 18:51:04 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 07 Jan 2016 15:51:04 -0800 (PST) Subject: [pypy-commit] pypy default: those just pass Message-ID: <568ef9e8.a5c9c20a.6cfe5.ffffccbe@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81612:22b851590fb0 Date: 2016-01-08 00:49 +0100 http://bitbucket.org/pypy/pypy/changeset/22b851590fb0/ Log: those just pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2969,7 +2969,6 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3015,7 +3014,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3039,7 +3037,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] From pypy.commits at gmail.com Fri Jan 8 01:59:05 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 22:59:05 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: fix Message-ID: <568f5e39.465fc20a.c35be.ffffab54@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2549:2db0bf58185a Date: 2016-01-08 07:58 +0100 http://bitbucket.org/cffi/cffi/changeset/2db0bf58185a/ Log: fix diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -68,7 +68,12 @@ path = self.get_path() env = os.environ.copy() env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) - env['LD_LIBRARY_PATH'] = path + libpath = env.get('LD_LIBRARY_PATH') + if libpath: + libpath = path + ':' + libpath + else: + libpath = path + env['LD_LIBRARY_PATH'] = libpath print 'running %r in %r' % (name, path) popen = subprocess.Popen([name], cwd=path, env=env, stdout=subprocess.PIPE) From pypy.commits at gmail.com Fri Jan 8 02:17:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 23:17:44 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Py3 fixes. Skip the embedding tests if linking with -lpython%d.%d Message-ID: <568f6298.6650c20a.a11d6.5253@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2550:e0a4793589e0 Date: 2016-01-08 08:17 +0100 http://bitbucket.org/cffi/cffi/changeset/e0a4793589e0/ Log: Py3 fixes. Skip the embedding tests if linking with -lpython%d.%d fails (likely, the Python was not compiled with --enable-shared) diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -176,7 +176,11 @@ if (PyDict_SetItemString(global_dict, "__builtins__", PyThreadState_GET()->interp->builtins) < 0) goto error; - x = PyEval_EvalCode((PyCodeObject *)pycode, global_dict, global_dict); + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); if (x == NULL) goto error; Py_DECREF(x); diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -29,4 +29,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -25,4 +25,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py --- a/testing/embedding/add3.py +++ b/testing/embedding/add3.py @@ -20,4 +20,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -25,4 +25,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -2,14 +2,33 @@ import sys, os, re import shutil, subprocess, time from testing.udir import udir +import cffi local_dir = os.path.dirname(os.path.abspath(__file__)) +_link_error = '?' + +def check_lib_python_found(tmpdir): + global _link_error + if _link_error == '?': + ffi = cffi.FFI() + kwds = {} + ffi._apply_embedding_fix(kwds) + ffi.set_source("_test_lib_python_found", "", **kwds) + try: + ffi.compile(tmpdir=tmpdir) + except cffi.VerificationError as e: + _link_error = e + else: + _link_error = None + if _link_error: + py.test.skip(str(_link_error)) class EmbeddingTests: _compiled_modules = {} def setup_method(self, meth): + check_lib_python_found(str(udir.ensure('embedding', dir=1))) self._path = udir.join('embedding', meth.__name__) def get_path(self): @@ -74,7 +93,7 @@ else: libpath = path env['LD_LIBRARY_PATH'] = libpath - print 'running %r in %r' % (name, path) + print('running %r in %r' % (name, path)) popen = subprocess.Popen([name], cwd=path, env=env, stdout=subprocess.PIPE) result = popen.stdout.read() diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py --- a/testing/embedding/test_performance.py +++ b/testing/embedding/test_performance.py @@ -6,42 +6,42 @@ perf_cffi = self.prepare_module('perf') self.compile('perf-test', [perf_cffi], opt=True) output = self.execute('perf-test') - print '='*79 - print output.rstrip() - print '='*79 + print('='*79) + print(output.rstrip()) + print('='*79) def test_perf_in_1_thread(self): perf_cffi = self.prepare_module('perf') self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '1'}) output = self.execute('perf-test') - print '='*79 - print output.rstrip() - print '='*79 + print('='*79) + print(output.rstrip()) + print('='*79) def test_perf_in_2_threads(self): perf_cffi = self.prepare_module('perf') self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '2'}) output = self.execute('perf-test') - print '='*79 - print output.rstrip() - print '='*79 + print('='*79) + print(output.rstrip()) + print('='*79) def test_perf_in_4_threads(self): perf_cffi = self.prepare_module('perf') self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '4'}) output = self.execute('perf-test') - print '='*79 - print output.rstrip() - print '='*79 + print('='*79) + print(output.rstrip()) + print('='*79) def test_perf_in_8_threads(self): perf_cffi = self.prepare_module('perf') self.compile('perf-test', [perf_cffi], opt=True, threads=True, defines={'PTEST_USE_THREAD': '8'}) output = self.execute('perf-test') - print '='*79 - print output.rstrip() - print '='*79 + print('='*79) + print(output.rstrip()) + print('='*79) diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py --- a/testing/embedding/tlocal.py +++ b/testing/embedding/tlocal.py @@ -25,4 +25,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) From pypy.commits at gmail.com Fri Jan 8 02:42:13 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 23:42:13 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Py3 fix Message-ID: <568f6855.2851c20a.ab8b3.5835@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2551:72802dce3d25 Date: 2016-01-08 08:41 +0100 http://bitbucket.org/cffi/cffi/changeset/72802dce3d25/ Log: Py3 fix diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -548,6 +548,8 @@ template = "python%d.%d" pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags libraries = kwds.get('libraries', []) if pythonlib not in libraries: kwds['libraries'] = libraries + [pythonlib] From pypy.commits at gmail.com Fri Jan 8 02:50:55 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 07 Jan 2016 23:50:55 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Py3 fix Message-ID: <568f6a5f.863f1c0a.50e5b.ffffb1cb@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2552:249be3c875c8 Date: 2016-01-08 08:47 +0100 http://bitbucket.org/cffi/cffi/changeset/249be3c875c8/ Log: Py3 fix diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -36,7 +36,9 @@ def _run(self, args, env=None): print(args) - popen = subprocess.Popen(args, env=env, cwd=self.get_path(), stdout=subprocess.PIPE) + popen = subprocess.Popen(args, env=env, cwd=self.get_path(), + stdout=subprocess.PIPE, + universal_newlines=True) output = popen.stdout.read() err = popen.wait() if err: From pypy.commits at gmail.com Fri Jan 8 02:53:31 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 07 Jan 2016 23:53:31 -0800 (PST) Subject: [pypy-commit] pypy osx-vmprof-support: close branch likely not going anywhere (different approach ahead) Message-ID: <568f6afb.44e21c0a.897ff.ffffb5b1@mx.google.com> Author: fijal Branch: osx-vmprof-support Changeset: r81614:de58d64bcb59 Date: 2016-01-08 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/de58d64bcb59/ Log: close branch likely not going anywhere (different approach ahead) From pypy.commits at gmail.com Fri Jan 8 02:53:34 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 07 Jan 2016 23:53:34 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: merge default Message-ID: <568f6afe.0f811c0a.ae6fe.ffffbb1b@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81615:58ef1d3f7f09 Date: 2016-01-08 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/58ef1d3f7f09/ Log: merge default diff too long, truncating to 2000 out of 34712 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -17,3 +17,4 @@ 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 +5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at @@ -56,14 +56,15 @@ Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen + Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -87,7 +88,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -200,9 +200,12 @@ Alex Perry Vincent Legoll Alan McIntyre + Spenser Bauman Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre + Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg @@ -234,6 +237,7 @@ Lutz Paelike Lucio Torre Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo Dan Buch Miguel de Val Borro @@ -244,6 +248,7 @@ Martin Blais Lene Wagner Tomo Cocoa + Kim Jin Su Toni Mattis Lucas Stadler Julian Berman @@ -253,6 +258,7 @@ Anna Katrina Dominguez William Leslie Bobby Impollonia + Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara @@ -283,6 +289,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey @@ -309,13 +316,16 @@ Stefan Marr jiaaro Mads Kiilerich + Richard Lancaster opassembler.py Antony Lee + Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer Even Wiik Thomassen jbs squeaky + Zearin soareschen Kurt Griffiths Mike Bayer @@ -327,6 +337,7 @@ Anna Ravencroft Andrey Churin Dan Crosta + Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -all: pypy-c +all: pypy-c cffi_imports PYPY_EXECUTABLE := $(shell which pypy) URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") @@ -10,6 +10,8 @@ RUNINTERP = $(PYPY_EXECUTABLE) endif +.PHONY: cffi_imports + pypy-c: @echo @echo "====================================================================" @@ -36,3 +38,6 @@ # replaced with an opaque --jobserver option by the time this Makefile # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html + +cffi_imports: + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -18,9 +18,9 @@ assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} try: - from __pypy__ import reversed_dict + from __pypy__ import reversed_dict as _reversed_dict except ImportError: - reversed_dict = lambda d: reversed(d.keys()) + _reversed_dict = None # don't have ordered dicts try: from thread import get_ident as _get_ident @@ -46,7 +46,7 @@ ''' def __reversed__(self): - return reversed_dict(self) + return _reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. @@ -116,6 +116,178 @@ return ItemsView(self) +def _compat_with_unordered_dicts(): + # This returns the methods needed in OrderedDict in case the base + # 'dict' class is not actually ordered, like on top of CPython or + # old PyPy or PyPy-STM. + + # ===== Original comments and code follows ===== + # ===== The unmodified methods are not repeated ===== + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + return locals() + +if _reversed_dict is None: + for _key, _value in _compat_with_unordered_dicts().items(): + setattr(OrderedDict, _key, _value) + del _key, _value + ################################################################################ ### namedtuple ################################################################################ diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -8,13 +8,13 @@ def __init__(self): self._builder = StringBuilder() def append(self, string): - try: - self._builder.append(string) - except UnicodeEncodeError: + if (isinstance(string, unicode) and + type(self._builder) is StringBuilder): ub = UnicodeBuilder() ub.append(self._builder.build()) self._builder = ub - ub.append(string) + self.append = ub.append # shortcut only + self._builder.append(string) def build(self): return self._builder.build() diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -604,21 +604,8 @@ def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=_buffer.raw) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except: - import random - bytes = [chr(random.randrange(256)) for i in range(16)] - return UUID(bytes=bytes, version=4) + import os + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.3.0 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.0" -__version_info__ = (1, 3, 0) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,7 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) -#define _CFFI_NUM_EXPORTS 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) +#define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -201,8 +203,11 @@ the others follow */ } +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); #endif -/********** end CPython-specific section **********/ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -72,6 +72,8 @@ self._cdefsources = [] self._included_ffis = [] self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -104,6 +106,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: + self._cdef_version = object() self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: @@ -589,14 +592,39 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.'): + def compile(self, tmpdir='.', verbose=0): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, **kwds) + source_extension=source_extension, + compiler_verbose=verbose, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result def _load_backend_lib(backend, name, flags): @@ -620,70 +648,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) - return - # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -697,6 +725,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -54,6 +54,7 @@ OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,6 +29,7 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -62,7 +63,8 @@ if csource.startswith('*', endpos): parts.append('('); closing += ')' level = 0 - for i in xrange(endpos, len(csource)): + i = endpos + while i < len(csource): c = csource[i] if c == '(': level += 1 @@ -73,11 +75,53 @@ elif c in ',;=': if level == 0: break + i += 1 csource = csource[endpos:i] + closing + csource[i:] #print repr(''.join(parts)+csource) parts.append(csource) return ''.join(parts) +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise api.CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise api.CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + def _preprocess(csource): # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literal! @@ -101,8 +145,13 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # # Replace "...}" with "__dotdotdotNUM__}". This construction should # occur only at the end of enums; at the end of structs we have "...;}" # and at the end of vararg functions "...);". Also replace "=...[,}]" @@ -255,6 +304,7 @@ break # try: + self._inside_extern_python = False for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -324,13 +374,19 @@ ' #define %s %s' % (key, key, key, value)) + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._inside_extern_python: + self._declare('extern_python ' + decl.name, tp) + else: + self._declare('function ' + decl.name, tp) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) else: if isinstance(node, pycparser.c_ast.Struct): self._get_struct_union_enum_type('struct', node) @@ -346,8 +402,7 @@ tp, quals = self._get_type_and_quals(node, partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) elif (tp.is_integer_type() and hasattr(decl, 'init') and hasattr(decl.init, 'value') and @@ -360,10 +415,23 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif (quals & model.Q_CONST) and not tp.is_array_type: - self._declare('constant ' + decl.name, tp, quals=quals) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = not self._inside_extern_python + assert self._inside_extern_python == ( + decl.name == '__cffi_extern_python_start') else: - self._declare('variable ' + decl.name, tp, quals=quals) + if self._inside_extern_python: + raise api.CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): return self.parse_type_and_quals(cdecl)[0] diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -17,15 +17,16 @@ def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] - allsources.extend(sources) + for src in sources: + allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -35,10 +36,10 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution - import distutils.errors + import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() @@ -48,7 +49,12 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - dist.run_command('build_ext') + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + finally: + distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -514,12 +514,17 @@ if self.baseinttype is not None: return self.baseinttype.get_cached_btype(ffi, finishlist) # + from . import api if self.enumvalues: smallest_value = min(self.enumvalues) largest_value = max(self.enumvalues) else: - smallest_value = 0 - largest_value = 0 + import warnings + warnings.warn("%r has no values explicitly defined; next version " + "will refuse to guess which integer type it is " + "meant to be (unsigned/signed, int/long)" + % self._get_c_name()) + smallest_value = largest_value = 0 if smallest_value < 0: # needs a signed type sign = 1 candidate1 = PrimitiveType("int") diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -1,5 +1,6 @@ -/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */ +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ typedef void *_cffi_opcode_t; @@ -27,6 +28,7 @@ #define _CFFI_OP_DLOPEN_FUNC 35 #define _CFFI_OP_DLOPEN_CONST 37 #define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 #define _CFFI_PRIM_VOID 0 #define _CFFI_PRIM_BOOL 1 @@ -160,6 +162,12 @@ const char *error_message; }; +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + #ifdef _CFFI_INTERNAL static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); static int search_in_globals(const struct _cffi_type_context_s *ctx, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -118,6 +118,7 @@ class Recompiler: + _num_externpy = 0 def __init__(self, ffi, module_name, target_is_python=False): self.ffi = ffi @@ -356,7 +357,10 @@ else: prnt(' NULL, /* no includes */') prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - prnt(' 0, /* flags */') + flags = 0 + if self._num_externpy: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) prnt('};') prnt() # @@ -366,6 +370,11 @@ prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') + if self._num_externpy: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') prnt(' p[0] = (const void *)%s;' % VERSION) prnt(' p[1] = &_cffi_type_context;') prnt('}') @@ -1108,6 +1117,75 @@ GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + + def _generate_cpy_extern_python_decl(self, tp, name): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s", %s };' % (name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise ffiplatform.VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + # ---------- # emitting the opcodes for individual types def _emit_bytecode_VoidType(self, tp, index): @@ -1232,7 +1310,8 @@ return os.path.join(outputdir, *parts), parts def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, **kwds): + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1252,7 +1331,7 @@ cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) return outputfilename diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -21,6 +21,8 @@ import math as _math import struct as _struct +_SENTINEL = object() + def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 @@ -31,6 +33,8 @@ MAXYEAR = 9999 _MINYEARFMT = 1900 +_MAX_DELTA_DAYS = 999999999 + # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day @@ -95,6 +99,15 @@ # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 +_US_PER_US = 1 +_US_PER_MS = 1000 +_US_PER_SECOND = 1000000 +_US_PER_MINUTE = 60000000 +_SECONDS_PER_DAY = 24 * 3600 +_US_PER_HOUR = 3600000000 +_US_PER_DAY = 86400000000 +_US_PER_WEEK = 604800000000 + def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." @@ -271,15 +284,17 @@ def _check_int_field(value): if isinstance(value, int): - return value + return int(value) if not isinstance(value, float): try: value = value.__int__() except AttributeError: pass else: - if isinstance(value, (int, long)): - return value + if isinstance(value, int): + return int(value) + elif isinstance(value, long): + return int(long(value)) raise TypeError('__int__ method should return an integer') raise TypeError('an integer is required') raise TypeError('integer argument expected, got float') @@ -344,75 +359,79 @@ raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) -# This is a start at a struct tm workalike. Goals: -# -# + Works the same way across platforms. -# + Handles all the fields datetime needs handled, without 1970-2038 glitches. -# -# Note: I suspect it's best if this flavor of tm does *not* try to -# second-guess timezones or DST. Instead fold whatever adjustments you want -# into the minutes argument (and the constructor will normalize). +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo -class _tmxxx: +def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d, ignore_overflow) + return y, m, d, hh, mm, ss, us - ordinal = None +def _normalize_date(year, month, day, ignore_overflow=False): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 - def __init__(self, year, month, day, hour=0, minute=0, second=0, - microsecond=0): - # Normalize all the inputs, and store the normalized values. - if not 0 <= microsecond <= 999999: - carry, microsecond = divmod(microsecond, 1000000) - second += carry - if not 0 <= second <= 59: - carry, second = divmod(second, 60) - minute += carry - if not 0 <= minute <= 59: - carry, minute = divmod(minute, 60) - hour += carry - if not 0 <= hour <= 23: - carry, hour = divmod(hour, 24) - day += carry + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) - # That was easy. Now it gets muddy: the proper range for day - # can't be determined without knowing the correct month and year, - # but if day is, e.g., plus or minus a million, the current month - # and year values make no sense (and may also be out of bounds - # themselves). - # Saying 12 months == 1 year should be non-controversial. - if not 1 <= month <= 12: - carry, month = divmod(month-1, 12) - year += carry - month += 1 - assert 1 <= month <= 12 + if not ignore_overflow and not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day - # Now only day can be out of bounds (year may also be out of bounds - # for a datetime object, but we don't care about that here). - # If day is out of bounds, what to do is arguable, but at least the - # method here is principled and explainable. - dim = _days_in_month(year, month) - if not 1 <= day <= dim: - # Move day-1 days from the first of the month. First try to - # get off cheap if we're only one day out of range (adjustments - # for timezone alone can't be worse than that). - if day == 0: # move back a day - month -= 1 - if month > 0: - day = _days_in_month(year, month) - else: - year, month, day = year-1, 12, 31 - elif day == dim + 1: # move forward a day - month += 1 - day = 1 - if month > 12: - month = 1 - year += 1 - else: - self.ordinal = _ymd2ord(year, month, 1) + (day - 1) - year, month, day = _ord2ymd(self.ordinal) - - self.year, self.month, self.day = year, month, day - self.hour, self.minute, self.second = hour, minute, second - self.microsecond = microsecond +def _accum(tag, sofar, num, factor, leftover): + if isinstance(num, (int, long)): + prod = num * factor + rsum = sofar + prod + return rsum, leftover + if isinstance(num, float): + fracpart, intpart = _math.modf(num) + prod = int(intpart) * factor + rsum = sofar + prod + if fracpart == 0.0: + return rsum, leftover + assert isinstance(factor, (int, long)) + fracpart, intpart = _math.modf(factor * fracpart) + rsum += int(intpart) + return rsum, leftover + fracpart + raise TypeError("unsupported type for timedelta %s component: %s" % + (tag, type(num))) class timedelta(object): """Represent the difference between two datetime objects. @@ -433,100 +452,42 @@ """ __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' - def __new__(cls, days=0, seconds=0, microseconds=0, - milliseconds=0, minutes=0, hours=0, weeks=0): - # Doing this efficiently and accurately in C is going to be difficult - # and error-prone, due to ubiquitous overflow possibilities, and that - # C double doesn't have enough bits of precision to represent - # microseconds over 10K years faithfully. The code here tries to make - # explicit where go-fast assumptions can be relied on, in order to - # guide the C implementation; it's way more convoluted than speed- - # ignoring auto-overflow-to-long idiomatic Python could be. + def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL, + milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL): + x = 0 + leftover = 0.0 + if microseconds is not _SENTINEL: + x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover) + if milliseconds is not _SENTINEL: + x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover) + if seconds is not _SENTINEL: + x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover) + if minutes is not _SENTINEL: + x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover) + if hours is not _SENTINEL: + x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover) + if days is not _SENTINEL: + x, leftover = _accum("days", x, days, _US_PER_DAY, leftover) + if weeks is not _SENTINEL: + x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover) + if leftover != 0.0: + x += _round(leftover) + return cls._from_microseconds(x) - # XXX Check that all inputs are ints, longs or floats. + @classmethod + def _from_microseconds(cls, us): + s, us = divmod(us, _US_PER_SECOND) + d, s = divmod(s, _SECONDS_PER_DAY) + return cls._create(d, s, us, False) - # Final values, all integer. - # s and us fit in 32-bit signed ints; d isn't bounded. - d = s = us = 0 + @classmethod + def _create(cls, d, s, us, normalize): + if normalize: + s, us = _normalize_pair(s, us, 1000000) + d, s = _normalize_pair(d, s, 24*3600) - # Normalize everything to days, seconds, microseconds. - days += weeks*7 - seconds += minutes*60 + hours*3600 - microseconds += milliseconds*1000 - - # Get rid of all fractions, and normalize s and us. - # Take a deep breath . - if isinstance(days, float): - dayfrac, days = _math.modf(days) - daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) - assert daysecondswhole == int(daysecondswhole) # can't overflow - s = int(daysecondswhole) - assert days == int(days) - d = int(days) - else: - daysecondsfrac = 0.0 - d = days - assert isinstance(daysecondsfrac, float) - assert abs(daysecondsfrac) <= 1.0 - assert isinstance(d, (int, long)) - assert abs(s) <= 24 * 3600 - # days isn't referenced again before redefinition - - if isinstance(seconds, float): - secondsfrac, seconds = _math.modf(seconds) - assert seconds == int(seconds) - seconds = int(seconds) - secondsfrac += daysecondsfrac - assert abs(secondsfrac) <= 2.0 - else: - secondsfrac = daysecondsfrac - # daysecondsfrac isn't referenced again - assert isinstance(secondsfrac, float) - assert abs(secondsfrac) <= 2.0 - - assert isinstance(seconds, (int, long)) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 2 * 24 * 3600 - # seconds isn't referenced again before redefinition - - usdouble = secondsfrac * 1e6 - assert abs(usdouble) < 2.1e6 # exact value not critical - # secondsfrac isn't referenced again - - if isinstance(microseconds, float): - microseconds = _round(microseconds + usdouble) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = int(microseconds) - else: - microseconds = int(microseconds) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = _round(microseconds + usdouble) - assert isinstance(s, int) - assert isinstance(microseconds, int) - assert abs(s) <= 3 * 24 * 3600 - assert abs(microseconds) < 3.1e6 - - # Just a little bit of carrying possible for microseconds and seconds. - seconds, us = divmod(microseconds, 1000000) - s += seconds - days, s = divmod(s, 24*3600) - d += days - - assert isinstance(d, (int, long)) - assert isinstance(s, int) and 0 <= s < 24*3600 - assert isinstance(us, int) and 0 <= us < 1000000 - - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) + if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS: + raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS)) self = object.__new__(cls) self._days = d @@ -535,6 +496,10 @@ self._hashcode = -1 return self + def _to_microseconds(self): + return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND + + self._microseconds) + def __repr__(self): module = "datetime." if self.__class__ is timedelta else "" if self._microseconds: @@ -562,8 +527,7 @@ def total_seconds(self): """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds) * 10**6 + - self.microseconds) / 10**6 + return self._to_microseconds() / 10**6 # Read-only field accessors @property @@ -585,36 +549,37 @@ if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days + other._days, - self._seconds + other._seconds, - self._microseconds + other._microseconds) + return timedelta._create(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds, + True) return NotImplemented - __radd__ = __add__ - def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days - other._days, - self._seconds - other._seconds, - self._microseconds - other._microseconds) - return NotImplemented - - def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other + return timedelta._create(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds, + True) return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(-self._days, - -self._seconds, - -self._microseconds) + return timedelta._create(-self._days, + -self._seconds, + -self._microseconds, + True) def __pos__(self): - return self + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta._create(self._days, + self._seconds, + self._microseconds, + False) def __abs__(self): if self._days < 0: @@ -623,25 +588,18 @@ return self def __mul__(self, other): - if isinstance(other, (int, long)): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days * other, - self._seconds * other, - self._microseconds * other) - return NotImplemented + if not isinstance(other, (int, long)): + return NotImplemented + usec = self._to_microseconds() + return timedelta._from_microseconds(usec * other) __rmul__ = __mul__ - def _to_microseconds(self): - return ((self._days * (24*3600) + self._seconds) * 1000000 + - self._microseconds) - def __div__(self, other): if not isinstance(other, (int, long)): return NotImplemented usec = self._to_microseconds() - return timedelta(0, 0, usec // other) + return timedelta._from_microseconds(usec // other) __floordiv__ = __div__ @@ -705,9 +663,8 @@ def __reduce__(self): return (self.__class__, self._getstate()) -timedelta.min = timedelta(-999999999) -timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, - microseconds=999999) +timedelta.min = timedelta(-_MAX_DELTA_DAYS) +timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1) timedelta.resolution = timedelta(microseconds=1) class date(object): @@ -948,32 +905,29 @@ # Computations - def _checkOverflow(self, year): - if not MINYEAR <= year <= MAXYEAR: - raise OverflowError("date +/-: result year %d not in %d..%d" % - (year, MINYEAR, MAXYEAR)) + def _add_timedelta(self, other, factor): + y, m, d = _normalize_date( + self._year, + self._month, + self._day + other.days * factor) + return date(y, m, d) def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): - t = _tmxxx(self._year, - self._month, - self._day + other.days) - self._checkOverflow(t.year) - result = date(t.year, t.month, t.day) - return result + return self._add_timedelta(other, 1) return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" - if isinstance(other, timedelta): - return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() - return timedelta(days1 - days2) + return timedelta._create(days1 - days2, 0, 0, False) + if isinstance(other, timedelta): + return self._add_timedelta(other, -1) return NotImplemented def weekday(self): @@ -1340,7 +1294,7 @@ offset = self._tzinfo.utcoffset(None) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1378,7 +1332,7 @@ offset = self._tzinfo.dst(None) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1505,18 +1459,24 @@ A timezone info object may be passed in as well. """ + _check_tzinfo_arg(tz) + converter = _time.localtime if tz is None else _time.gmtime + self = cls._from_timestamp(converter, timestamp, tz) + if tz is not None: + self = tz.fromutc(self) + return self - _check_tzinfo_arg(tz) + @classmethod + def utcfromtimestamp(cls, t): + "Construct a UTC datetime from a POSIX timestamp (like time.time())." + return cls._from_timestamp(_time.gmtime, t, None) - converter = _time.localtime if tz is None else _time.gmtime - - if isinstance(timestamp, int): - us = 0 - else: - t_full = timestamp - timestamp = int(_math.floor(timestamp)) - frac = t_full - timestamp - us = _round(frac * 1e6) + @classmethod + def _from_timestamp(cls, converter, timestamp, tzinfo): + t_full = timestamp + timestamp = int(_math.floor(timestamp)) + frac = t_full - timestamp + us = _round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, @@ -1527,32 +1487,7 @@ us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp) ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) - if tz is not None: - result = tz.fromutc(result) - return result - - @classmethod - def utcfromtimestamp(cls, t): - "Construct a UTC datetime from a POSIX timestamp (like time.time())." - if isinstance(t, int): - us = 0 - else: - t_full = t - t = int(_math.floor(t)) - frac = t_full - t - us = _round(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: - t += 1 - us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - return cls(y, m, d, hh, mm, ss, us) + return cls(y, m, d, hh, mm, ss, us, tzinfo) @classmethod def now(cls, tz=None): @@ -1594,9 +1529,9 @@ hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 - tm = _tmxxx(y, m, d, hh, mm - offset) - y, m, d = tm.year, tm.month, tm.day - hh, mm = tm.hour, tm.minute + mm -= offset + y, m, d, hh, mm, ss, _ = _normalize_datetime( + y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): @@ -1730,7 +1665,7 @@ offset = self._tzinfo.utcoffset(self) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1768,7 +1703,7 @@ offset = self._tzinfo.dst(self) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1859,22 +1794,22 @@ return -1 return diff and 1 or 0 + def _add_timedelta(self, other, factor): + y, m, d, hh, mm, ss, us = _normalize_datetime( + self._year, + self._month, + self._day + other.days * factor, + self._hour, + self._minute, + self._second + other.seconds * factor, + self._microsecond + other.microseconds * factor) + return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo) + def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - t = _tmxxx(self._year, - self._month, - self._day + other.days, - self._hour, - self._minute, - self._second + other.seconds, - self._microsecond + other.microseconds) - self._checkOverflow(t.year) - result = datetime(t.year, t.month, t.day, - t.hour, t.minute, t.second, - t.microsecond, tzinfo=self._tzinfo) - return result + return self._add_timedelta(other, 1) __radd__ = __add__ @@ -1882,16 +1817,15 @@ "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): - return self + -other + return self._add_timedelta(other, -1) return NotImplemented - days1 = self.toordinal() - days2 = other.toordinal() - secs1 = self._second + self._minute * 60 + self._hour * 3600 - secs2 = other._second + other._minute * 60 + other._hour * 3600 - base = timedelta(days1 - days2, - secs1 - secs2, - self._microsecond - other._microsecond) + delta_d = self.toordinal() - other.toordinal() + delta_s = (self._hour - other._hour) * 3600 + \ + (self._minute - other._minute) * 60 + \ + (self._second - other._second) + delta_us = self._microsecond - other._microsecond + base = timedelta._create(delta_d, delta_s, delta_us, True) if self._tzinfo is other._tzinfo: return base myoff = self._utcoffset() diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -88,9 +88,19 @@ # try: unbound_method = getattr(_continulet, methodname) + _tls.leaving = current args, kwds = unbound_method(current, *baseargs, to=target) - finally: _tls.current = current + except: + _tls.current = current + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + _tls.leaving = None + raise + else: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + _tls.leaving = None # if kwds: if args: @@ -122,6 +132,34 @@ return f.f_back.f_back.f_back # go past start(), __switch(), switch() # ____________________________________________________________ +# Recent additions + +GREENLET_USE_GC = True +GREENLET_USE_TRACING = True + +def gettrace(): + return getattr(_tls, 'trace', None) + +def settrace(callback): + try: + prev = _tls.trace + del _tls.trace + except AttributeError: + prev = None + if callback is not None: + _tls.trace = callback + return prev + +def _run_trace_callback(event): + try: + _tls.trace(event, (_tls.leaving, _tls.current)) + except: + # In case of exceptions trace function is removed + if hasattr(_tls, 'trace'): + del _tls.trace + raise + +# ____________________________________________________________ # Internal stuff try: @@ -143,22 +181,32 @@ _tls.current = gmain def _greenlet_start(greenlet, args): - args, kwds = args - _tls.current = greenlet try: - res = greenlet.run(*args, **kwds) - except GreenletExit, e: - res = e + args, kwds = args + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + res = greenlet.run(*args, **kwds) + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet def _greenlet_throw(greenlet, exc, value, tb): - _tls.current = greenlet try: - raise exc, value, tb - except GreenletExit, e: - res = e + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + raise exc, value, tb + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) @@ -138,8 +150,16 @@ current directory. The executable behaves mostly like a normal Python interpreter (see :doc:`cpython_differences`). +Build cffi import libraries for the stdlib +------------------------------------------ -.. _translate-pypy: +Various stdlib modules require a separate build step to create the cffi From pypy.commits at gmail.com Fri Jan 8 02:53:29 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 07 Jan 2016 23:53:29 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: a bit of random progress Message-ID: <568f6af9.cb941c0a.4fbee.ffffd855@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r81613:a59079c649da Date: 2016-01-08 09:51 +0200 http://bitbucket.org/pypy/pypy/changeset/a59079c649da/ Log: a bit of random progress diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -5,7 +5,7 @@ ResOperation, oparity, opname, rop from rpython.rlib.rarithmetic import intmask -TAGINT, TAGCONST, TAGBOX = range(3) +TAGINT, TAGCONST, TAGBOX, TAGOUTPUT = range(4) TAGMASK = 0x3 TAGSHIFT = 2 MAXINT = 65536 @@ -38,10 +38,11 @@ return RecordedOp(pos, opnum, args) class RecordedOp(AbstractValue): - def __init__(self, pos, opnum, args): + def __init__(self, pos, opnum, args, descr=None): self.opnum = opnum self.args = args self._pos = pos + self.descr = descr def get_tag(self): return tag(TAGBOX, self._pos) @@ -49,6 +50,9 @@ def getarglist(self): return self.args + def getdescr(self): + return self.descr + def numargs(self): return len(self.args) @@ -69,7 +73,6 @@ class Trace(object): - # XXX eventually merge with history.TreeLoop, maybe def __init__(self, inputargs): self._ops = [0] * (2 * len(inputargs)) # place for forwarding inputargs # plus infos @@ -92,6 +95,19 @@ self._count += 1 return pos + def _record_raw(self, opnum, tagged_args, tagged_descr=-1): + operations = self._ops + pos = len(operations) + operations.append(opnum) + operations.append(self._count) # here we keep the index into infos + if oparity[opnum] == -1: + operations.append(len(tagged_args)) + operations.extend(tagged_args) + if tagged_descr != -1: + operations.append(tagged_descr) + self._count += 1 + return pos + def record_forwarding(self, op, newtag): index = op._pos self._ops[index] = -newtag - 1 @@ -101,8 +117,11 @@ pos = self._record_op(opnum, argboxes, descr) return ResOperation(opnum, argboxes, pos, descr) - def record_op_tag(self, opnum, argboxes, descr=None): - return tag(TAGBOX, self._record_op(opnum, argboxes, descr)) + def record_op_tag(self, opnum, tagged_args, descr=None): + return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) + + def record_op_output_tag(self, opnum, tagged_args, descr=None): + return tag(TAGOUTPUT, self._record_raw(opnum, tagged_args, descr)) def get_info(self, infos, pos): index = self._ops[pos + 1] diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -356,6 +356,7 @@ tagged = -opnum - 1 def force_box(self, op, optforce=None): + # XXX return op op = self.get_box_replacement(op) if optforce is None: @@ -416,11 +417,16 @@ else: op.set_forwarded(newop) - def replace_op_with(self, op, newopnum, args=None, descr=None): + def replace_op_with(self, op, newopnum, args=None, descr=None, output=False): # recorded_op -> tagged - newtag = self.trace.record_op_tag(newopnum, args, descr) + if not output: + newtag = self.trace.record_op_tag(newopnum, args, descr) + else: + newtag = self.output.record_op_output_tag(newopnum, args, descr) self.trace.record_forwarding(op, newtag) + # XXX info forwarding return newtag + newop = op.copy_and_change(newopnum, args, descr) if newop.type != 'v': op = self.get_box_replacement(op) @@ -529,7 +535,7 @@ def propagate_all_forward(self, trace, call_pure_results=None, rename_inputargs=True, flush=True): - #self.output = opencoder.Trace() # <- XXXX + self.output = opencoder.Trace([]) # <- XXXX, put inputargs self.infos = [None] * trace._count self.trace = trace #if rename_inputargs: @@ -577,14 +583,14 @@ def emit_operation(self, op): if rop.returns_bool_result(op.opnum): self.getintbound(op).make_bool() - self._emit_operation(op) - op = self.get_box_replacement(op) - if op.type == 'i': - opinfo = op.get_forwarded() - if opinfo is not None: - assert isinstance(opinfo, IntBound) - if opinfo.is_constant(): - op.set_forwarded(ConstInt(opinfo.getint())) + tagged_op = self._emit_operation(op) + # XXX what is this about? looks pretty optional + #if op.type == 'i': + # opinfo = op.get_forwarded() + # if opinfo is not None: + # assert isinstance(opinfo, IntBound) + # if opinfo.is_constant(): + # op.set_forwarded(ConstInt(opinfo.getint())) @specialize.argtype(0) def _emit_operation(self, op): @@ -594,13 +600,15 @@ if self.is_constant(tagged): return # can happen e.g. if we postpone the operation that becomes # constant - xxxx - op = self.replace_op_with(op, op.getopnum()) - for i in range(op.numargs()): - arg = self.force_box(op.getarg(i)) - op.setarg(i, arg) + arglist = op.getarglist() + for i in range(len(arglist)): + arglist[i] = self.force_box(arglist[i]) + opnum = op.opnum + tagged_op = self.replace_op_with(op, opnum, arglist, op.getdescr(), + output=True) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) - if op.is_guard(): + if rop.is_guard(opnum): + xxx assert isinstance(op, GuardResOp) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_GUARDS) pendingfields = self.pendingfields @@ -611,15 +619,16 @@ return else: op = self.emit_guard_operation(op, pendingfields) - elif op.can_raise(): + elif rop.can_raise(opnum): self.exception_might_have_happened = True - if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or - op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): - pass - else: - self._last_guard_op = None + #if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or + # op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + # pass + #else: + # self._last_guard_op = None self._really_emitted_operation = op - self._newoperations.append(op) + #self._newoperations.append(op) + return tagged_op def emit_guard_operation(self, op, pendingfields): guard_op = self.replace_op_with(op, op.getopnum()) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -361,9 +361,6 @@ # XXX kill all those in favor of ophelpers - def is_guard(self): - return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST - def is_foldable_guard(self): return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST @@ -384,9 +381,6 @@ def has_no_side_effect(self): return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST - def can_raise(self): - return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST - def is_malloc(self): # a slightly different meaning from can_malloc return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST @@ -1311,8 +1305,9 @@ return rop.CALL_F return rop.CALL_N - def is_guard(self): - return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST + @staticmethod + def is_guard(opnum): + return rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST @staticmethod def is_comparison(opnum): @@ -1340,8 +1335,9 @@ def has_no_side_effect(opnum): return rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST - def can_raise(self): - return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST + @staticmethod + def can_raise(opnum): + return rop._CANRAISE_FIRST <= opnum <= rop._CANRAISE_LAST def is_malloc(self): # a slightly different meaning from can_malloc diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -5,9 +5,17 @@ from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer class SimpleOptimizer(Optimizer): + class metainterp_sd: + class profiler: + @staticmethod + def count(*args): + pass + def __init__(self, trace): self.trace = trace + self.optimizer = self # uh? self.infos = [None] * trace._count + self.output = Trace([]) class TestOpencoder(object): def unpack(self, t): @@ -26,10 +34,10 @@ assert len(l) == 2 assert l[0].opnum == rop.INT_ADD assert l[1].opnum == rop.INT_ADD - assert (untag(l[1].args[1]) == TAGINT, 1) - assert (untag(l[1].args[0]) == TAGBOX, l[0]._pos) - assert (untag(l[0].args[0]) == TAGBOX, 0) - assert (untag(l[0].args[1]) == TAGBOX, 1) + assert untag(l[1].args[1]) == (TAGINT, 1) + assert untag(l[1].args[0]) == (TAGBOX, l[0]._pos) + assert untag(l[0].args[0]) == (TAGBOX, 0) + assert untag(l[0].args[1]) == (TAGBOX, 1) def test_forwarding(self): i0, i1 = InputArgInt(), InputArgInt() @@ -51,4 +59,10 @@ assert opt.getintbound(add.get_tag()) def test_output(self): - pass \ No newline at end of file + i0 = InputArgInt() + t = Trace([i0]) + t.record_op(rop.INT_ADD, [i0, ConstInt(1)]) + opt = SimpleOptimizer(t) + add, = self.unpack(t) + opt.emit_operation(add) +# xxx \ No newline at end of file From pypy.commits at gmail.com Fri Jan 8 03:09:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 08 Jan 2016 00:09:55 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: malloc_cond_varsize impl Message-ID: <568f6ed3.0f811c0a.ae6fe.ffffc06e@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81616:068e8f6aa644 Date: 2016-01-08 09:05 +0100 http://bitbucket.org/pypy/pypy/changeset/068e8f6aa644/ Log: malloc_cond_varsize impl diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1217,7 +1217,7 @@ # new value of nursery_free_adr in RSZ and the adr of the new object # in RES. - self.load_gcmap(mc, r.SCRATCH, gcmap) + self.load_gcmap(mc, r.r1, gcmap) # We are jumping to malloc_slowpath without a call through a function # descriptor, because it is an internal call and "call" would trash # r2 and r11 @@ -1269,7 +1269,6 @@ def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, arraydescr): - xxx from rpython.jit.backend.llsupport.descr import ArrayDescr assert isinstance(arraydescr, ArrayDescr) @@ -1280,46 +1279,47 @@ if maxlength > 2**16-1: maxlength = 2**16-1 # makes things easier mc = self.mc - mc.cmp_op(0, lengthloc.value, maxlength, imm=True, signed=False) + mc.cmp_op(lengthloc, maxlength, imm=True, signed=False) jmp_adr0 = mc.currpos() - mc.trap() # conditional jump, patched later + mc.reserve_cond_jump(short=True) # conditional jump, patched later # ------------------------------------------------------------ # block of code for the case: the length is <= maxlength diff = nursery_top_adr - nursery_free_adr assert _check_imm_arg(diff) - mc.load_imm(r.r2, nursery_free_adr) + mc.load_imm(r.r1, nursery_free_adr) - varsizeloc = self._multiply_by_constant(lengthloc, itemsize, - r.RSZ) # varsizeloc is either RSZ here, or equal to lengthloc if # itemsize == 1. It is the size of the variable part of the # array, in bytes. - mc.load(r.RES.value, r.r2.value, 0) # load nursery_free - mc.load(r.SCRATCH.value, r.r2.value, diff) # load nursery_top + mc.load(r.RES, l.addr(0, r.r1)) # load nursery_free + mc.load(r.SCRATCH2, l.addr(diff, r.r1)) # load nursery_top assert arraydescr.basesize >= self.gc_minimal_size_in_nursery constsize = arraydescr.basesize + self.gc_size_of_header force_realignment = (itemsize % WORD) != 0 if force_realignment: constsize += WORD - 1 - mc.addi(r.RSZ.value, varsizeloc.value, constsize) + if varsizeloc is not r.RSZ: + mc.LGR(r.RSZ, varsizeloc) + mc.AGFI(r.RSZ, l.imm(constsize)) if force_realignment: # "& ~(WORD-1)" + xxx bit_limit = 60 if WORD == 8 else 61 mc.rldicr(r.RSZ.value, r.RSZ.value, 0, bit_limit) - mc.add(r.RSZ.value, r.RES.value, r.RSZ.value) + mc.AGR(r.RSZ, r.RES) # now RSZ contains the total size in bytes, rounded up to a multiple # of WORD, plus nursery_free_adr - mc.cmp_op(0, r.RSZ.value, r.SCRATCH.value, signed=False) + mc.cmp_op(r.RSZ, r.SCRATCH, signed=False) jmp_adr1 = mc.currpos() - mc.trap() # conditional jump, patched later + mc.reserve_cond_jump(short=True) # conditional jump, patched later # ------------------------------------------------------------ # block of code for two cases: either the length is > maxlength @@ -1328,13 +1328,21 @@ # offset = mc.currpos() - jmp_adr0 pmc = OverwritingBuilder(mc, jmp_adr0, 1) - pmc.bgt(offset) # jump if GT + pmc.BRC(c.GT, l.imm(offset)) # jump if GT pmc.overwrite() # # save the gcmap - self.load_gcmap(mc, r.r2, gcmap) + self.load_gcmap(mc, r.r1, gcmap) # - # load the function to call into CTR + # load the argument(s) + if kind == rewrite.FLAG_ARRAY: + mc.LGR(r.RSZ, lengthloc) + mc.load_imm(r.RES, itemsize) + mc.load_imm(r.SCRATCH2, arraydescr.tid) + else: + mc.LGR(r.RES, lengthloc) + # + # load the function into r14 and jump if kind == rewrite.FLAG_ARRAY: addr = self.malloc_slowpath_varsize elif kind == rewrite.FLAG_STR: @@ -1343,22 +1351,12 @@ addr = self.malloc_slowpath_unicode else: raise AssertionError(kind) - mc.load_imm(r.SCRATCH, addr) - mc.mtctr(r.SCRATCH.value) - # - # load the argument(s) - if kind == rewrite.FLAG_ARRAY: - mc.mr(r.RSZ.value, lengthloc.value) - mc.load_imm(r.RES, itemsize) - mc.load_imm(r.SCRATCH, arraydescr.tid) - else: - mc.mr(r.RES.value, lengthloc.value) # # call! - mc.bctrl() + mc.branch_absolute(addr) jmp_location = mc.currpos() - mc.trap() # jump forward, patched later + mc.reserve_cond_jump(short=True) # jump forward, patched later # ------------------------------------------------------------ # block of code for the common case: the length is <= maxlength @@ -1366,21 +1364,20 @@ offset = mc.currpos() - jmp_adr1 pmc = OverwritingBuilder(mc, jmp_adr1, 1) - pmc.ble(offset) # jump if LE + pmc.BRC(c.LE, l.imm(offset)) # jump if LE pmc.overwrite() # # write down the tid, but only in this case (not in other cases # where r.RES is the result of the CALL) - mc.load_imm(r.SCRATCH, arraydescr.tid) - mc.store(r.SCRATCH.value, r.RES.value, 0) + mc.load_imm(r.SCRATCH2, arraydescr.tid) + mc.STG(r.SCRATCH2, l.addr(0, r.RES.value)) # while we're at it, this line is not needed if we've done the CALL - mc.store(r.RSZ.value, r.r2.value, 0) # store into nursery_free + mc.store(r.RSZ, l.addr(0, r.r2)) # store into nursery_free # ------------------------------------------------------------ - offset = mc.currpos() - jmp_location pmc = OverwritingBuilder(mc, jmp_location, 1) - pmc.b(offset) # jump always + pmc.BCR(c.ANY, l.imm(offset)) # jump always pmc.overwrite() def notimplemented_op(asm, op, arglocs, regalloc): From pypy.commits at gmail.com Fri Jan 8 03:09:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 08 Jan 2016 00:09:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged memop simplify (malloc_nursery_varsize updates) Message-ID: <568f6ed4.520e1c0a.322f7.ffffbc66@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81617:782f331d2e8b Date: 2016-01-08 09:09 +0100 http://bitbucket.org/pypy/pypy/changeset/782f331d2e8b/ Log: merged memop simplify (malloc_nursery_varsize updates) diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': diff --git a/pypy/module/test_lib_pypy/test_cPickle.py b/pypy/module/test_lib_pypy/test_cPickle.py --- a/pypy/module/test_lib_pypy/test_cPickle.py +++ b/pypy/module/test_lib_pypy/test_cPickle.py @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -722,7 +722,8 @@ def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): if bytes != 8: raise Exception("gc_load_indexed_f is only for 'double'!") - return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -203,39 +203,48 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) - if op.is_getarrayitem() or \ - op.getopnum() in (rop.GETARRAYITEM_RAW_I, - rop.GETARRAYITEM_RAW_F): + opnum = op.getopnum() + if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: + v_length = op.getarg(2) + scale = op.getarg(1).getint() + if scale not in self.cpu.load_supported_factors: + scale, offset, v_length = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + op.setarg(1, ConstInt(scale)) + op.setarg(2, v_length) + elif op.is_getarrayitem() or \ + opnum in (rop.GETARRAYITEM_RAW_I, + rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) - elif op.getopnum() in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): + elif opnum in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): self.handle_setarrayitem(op) - elif op.getopnum() == rop.RAW_STORE: + elif opnum == rop.RAW_STORE: itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, itemsize, 1, ofs) - elif op.getopnum() in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): + elif opnum in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, - rop.GETINTERIORFIELD_GC_F): + elif opnum in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, + rop.GETINTERIORFIELD_GC_F): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, fieldsize, itemsize, ofs, sign) - elif op.getopnum() in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): + elif opnum in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) - elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, - rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): + elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, + rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, + rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) if op.getopnum() in (rop.GETFIELD_GC_F, rop.GETFIELD_GC_I, rop.GETFIELD_GC_R): @@ -250,45 +259,45 @@ self.emit_op(op) return True self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.SETFIELD_GC, rop.SETFIELD_RAW): + elif opnum in (rop.SETFIELD_GC, rop.SETFIELD_RAW): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) value_box = op.getarg(1) self.emit_gc_store_or_indexed(op, ptr_box, ConstInt(0), value_box, itemsize, 1, ofs) - elif op.getopnum() == rop.ARRAYLEN_GC: + elif opnum == rop.ARRAYLEN_GC: descr = op.getdescr() assert isinstance(descr, ArrayDescr) ofs = descr.lendescr.offset self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs, NOT_SIGNED) - elif op.getopnum() == rop.STRLEN: + elif opnum == rop.STRLEN: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.UNICODELEN: + elif opnum == rop.UNICODELEN: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.STRGETITEM: + elif opnum == rop.STRGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.UNICODEGETITEM: + elif opnum == rop.UNICODEGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.STRSETITEM: + elif opnum == rop.STRSETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), itemsize, itemsize, basesize) - elif op.getopnum() == rop.UNICODESETITEM: + elif opnum == rop.UNICODESETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), @@ -527,6 +536,7 @@ # replaced with another constant greater than 0.) #o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], # descr=arraydescr) + assert isinstance(arraydescr, ArrayDescr) scale = arraydescr.itemsize v_length_scaled = v_length if not isinstance(v_length, ConstInt): @@ -655,6 +665,7 @@ for op in self.last_zero_arrays: assert op.getopnum() == rop.ZERO_ARRAY descr = op.getdescr() + assert isinstance(descr, ArrayDescr) scale = descr.itemsize box = op.getarg(0) try: @@ -785,8 +796,12 @@ arraydescr.lendescr.offset != gc_descr.standard_array_length_ofs)): return False self.emitting_an_operation_that_can_collect() + scale = itemsize + if scale not in self.cpu.load_supported_factors: + scale, offset, v_length = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, - [ConstInt(kind), ConstInt(itemsize), v_length], + [ConstInt(kind), ConstInt(scale), v_length], descr=arraydescr) self.replace_op_with(v_result, op) self.emit_op(op) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -315,7 +315,7 @@ 'strdescr': arraydescr}) # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.calls == [(8, 15, 10), (5, 15, 3), ('str', 3)] + assert gc_ll_descr.calls == [(8, 15, 10), (1, 15, 15), ('str', 15)] # one fit, one was too large, one was not fitting def test_malloc_slowpath(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1502,32 +1502,6 @@ genop_gc_load_indexed_r = _genop_gc_load_indexed genop_gc_load_indexed_f = _genop_gc_load_indexed - def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): - """Produce one operation to do roughly - targetreg = sourcereg * itemsize - except that the targetreg may still need shifting by 0,1,2,3. - """ - if (itemsize & 7) == 0: - shift = 3 - elif (itemsize & 3) == 0: - shift = 2 - elif (itemsize & 1) == 0: - shift = 1 - else: - shift = 0 - itemsize >>= shift - # - if valid_addressing_size(itemsize - 1): - mc.LEA_ra(targetreg, (sourcereg, sourcereg, - get_scale(itemsize - 1), 0)) - elif valid_addressing_size(itemsize): - mc.LEA_ra(targetreg, (rx86.NO_BASE_REGISTER, sourcereg, - get_scale(itemsize), 0)) - else: - mc.IMUL_rri(targetreg, sourcereg, itemsize) - # - return shift - def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -2354,12 +2328,8 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - if valid_addressing_size(itemsize): - shift = get_scale(itemsize) - else: - shift = self._imul_const_scaled(self.mc, edi.value, - varsizeloc.value, itemsize) - varsizeloc = edi + assert valid_addressing_size(itemsize) + shift = get_scale(itemsize) # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1009,7 +1009,7 @@ self.rm.possibly_free_var(length_box) # itemsize = op.getarg(1).getint() - maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) / itemsize + maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) self.assembler.malloc_cond_varsize( op.getarg(0).getint(), gc_ll_descr.get_nursery_free_addr(), diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -399,9 +399,7 @@ rop.GC_LOAD_I, rop.GC_LOAD_R, rop.GC_LOAD_F, - rop.GC_LOAD_INDEXED_I, rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_F, rop.GC_STORE, rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl From pypy.commits at gmail.com Fri Jan 8 04:03:13 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 08 Jan 2016 01:03:13 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: kill some irrelevant parts Message-ID: <568f7b51.552f1c0a.96293.ffffcf17@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81618:894173dc749c Date: 2016-01-08 11:02 +0200 http://bitbucket.org/pypy/pypy/changeset/894173dc749c/ Log: kill some irrelevant parts diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -104,7 +104,7 @@ target = target.join('trampoline_%s_%s.vmprof.c' % (name, token)) target.write(""" #include "src/precommondefs.h" -%(vmprof_stack_h)s +#include "vmprof_stack.h" %(type)s %(cont_name)s(%(llargs)s); @@ -122,106 +122,18 @@ return result; } """ % locals()) - return finish_ll_trampoline(tramp_name, tramp_name, target, token, - restok) - -def make_trampoline_function(name, func, token, restok): - from rpython.jit.backend import detect_cpu - - cont_name = 'rpyvmprof_f_%s_%s' % (name, token) - tramp_name = 'rpyvmprof_t_%s_%s' % (name, token) - orig_tramp_name = tramp_name - - func.c_name = cont_name - func._dont_inline_ = True - - if sys.platform == 'darwin': - # according to internet "At the time UNIX was written in 1974...." - # "... all C functions are prefixed with _" - cont_name = '_' + cont_name - tramp_name = '_' + tramp_name - PLT = "" - size_decl = "" - type_decl = "" - extra_align = "" - else: - PLT = "@PLT" - type_decl = "\t.type\t%s, @function" % (tramp_name,) - size_decl = "\t.size\t%s, .-%s" % ( - tramp_name, tramp_name) - extra_align = "\t.cfi_def_cfa_offset 8" - - assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( - "rvmprof only supports x86-64 CPUs for now") - - # mapping of argument count (not counting the final uid argument) to - # the register that holds this uid argument - reg = {0: '%rdi', - 1: '%rsi', - 2: '%rdx', - 3: '%rcx', - 4: '%r8', - 5: '%r9', - } - try: - reg = reg[len(token)] - except KeyError: - raise NotImplementedError( - "not supported: %r takes more than 5 arguments" % (func,)) - - target = udir.join('module_cache') - target.ensure(dir=1) - target = target.join('trampoline_%s_%s.vmprof.s' % (name, token)) - # NOTE! the tabs in this file are absolutely essential, things - # that don't start with \t are silently ignored (: WAT!?) - target.write("""\ -\t.text -\t.globl\t%(tramp_name)s -%(type_decl)s -%(tramp_name)s: -\t.cfi_startproc -\tpushq\t%(reg)s -\t.cfi_def_cfa_offset 16 -\tcall %(cont_name)s%(PLT)s -\taddq\t$8, %%rsp -%(extra_align)s -\tret -\t.cfi_endproc -%(size_decl)s -""" % locals()) - return finish_ll_trampoline(orig_tramp_name, tramp_name, target, token, - restok) - -def finish_ll_trampoline(orig_tramp_name, tramp_name, target, token, restok): - - extra_args = ['long'] header = 'RPY_EXTERN %s %s(%s);\n' % ( - token2ctype(restok), - orig_tramp_name, - ', '.join([token2ctype(tok) for tok in token] + extra_args)) - - header += """\ -static int cmp_%s(void *addr) { - if (addr == %s) return 1; -#ifdef VMPROF_ADDR_OF_TRAMPOLINE - return VMPROF_ADDR_OF_TRAMPOLINE(addr); -#undef VMPROF_ADDR_OF_TRAMPOLINE -#else - return 0; -#endif -#define VMPROF_ADDR_OF_TRAMPOLINE cmp_%s -} -""" % (tramp_name, orig_tramp_name, tramp_name) + token2ctype(restok), tramp_name, + ', '.join([token2ctype(tok) for tok in token] + ['long'])) eci = ExternalCompilationInfo( post_include_bits = [header], separate_module_files = [str(target)], ) eci = eci.merge(global_eci) - ARGS = [token2lltype(tok) for tok in token] + [lltype.Signed] return rffi.llexternal( - orig_tramp_name, ARGS, + tramp_name, ARGS, token2lltype(restok), compilation_info=eci, _nowrapper=True, sandboxsafe=True, diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -27,10 +27,7 @@ self._code_classes = set() self._gather_all_code_objs = lambda: None self._cleanup_() - if sys.maxint == 2147483647: - self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit - else: - self._code_unique_id = 0x7000000000000000 + self._code_unique_id = 0 self.cintf = cintf.setup() def _cleanup_(self): diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -11,68 +11,6 @@ static void *tramp_start, *tramp_end; #endif - -static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, void *cp) { - -#if defined(PYPY_JIT_CODEMAP) - - intptr_t ip_l = (intptr_t)ip; - return pypy_jit_stack_depth_at_loc(ip_l); - -#elif defined(CPYTHON_GET_CUSTOM_OFFSET) - - if (ip >= tramp_start && ip <= tramp_end) { - // XXX the return value is wrong for all the places before push and - // after pop, fix - void *bp; - void *sp; - - /* This is a stage2 trampoline created by hotpatch: - - push %rbx - push %rbp - mov %rsp,%rbp - and $0xfffffffffffffff0,%rsp // make sure the stack is aligned - movabs $0x7ffff687bb10,%rbx - callq *%rbx - leaveq - pop %rbx - retq - - the stack layout is like this: - - +-----------+ high addresses - | ret addr | - +-----------+ - | saved rbx | start of the function frame - +-----------+ - | saved rbp | - +-----------+ - | ........ | <-- rbp - +-----------+ low addresses - - So, the trampoline frame starts at rbp+16, and the return address, - is at rbp+24. The vmprof API requires us to return the offset of - the frame relative to sp, hence we have this weird computation. - - XXX (antocuni): I think we could change the API to return directly - the frame address instead of the offset; however, this require a - change in the PyPy code too - */ - - unw_get_reg (cp, UNW_REG_SP, (unw_word_t*)&sp); - unw_get_reg (cp, UNW_X86_64_RBP, (unw_word_t*)&bp); - return bp+16+8-sp; - } - return -1; - -#else - - return -1; - -#endif -} - static long vmprof_write_header_for_jit_addr(void **result, long n, void *ip, int max_depth) { diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -25,17 +25,14 @@ #include #include #include +#include #include #include #include #include +#include #include #include "vmprof_getpc.h" -#ifdef __APPLE__ -#include "libunwind.h" -#else -#include "vmprof_unwind.h" -#endif #include "vmprof_mt.h" #include "vmprof_stack.h" @@ -58,14 +55,6 @@ static int opened_profile(char *interp_name); static void flush_codes(void); -#ifdef __APPLE__ -#define UNWIND_NAME "/usr/lib/system/libunwind.dylib" -#define UNW_PREFIX "unw" -#else -#define UNWIND_NAME "libunwind.so" -#define UNW_PREFIX "_ULx86_64" -#endif - RPY_EXTERN char *vmprof_init(int fd, double interval, char *interp_name) { @@ -73,22 +62,6 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); -#ifndef __APPLE__ - if (!unw_get_reg) { - void *libhandle; - - if (!(libhandle = dlopen(UNWIND_NAME, RTLD_LAZY | RTLD_LOCAL))) - goto error; - if (!(unw_get_reg = dlsym(libhandle, UNW_PREFIX "_get_reg"))) - goto error; - if (!(unw_get_proc_info = dlsym(libhandle, UNW_PREFIX "_get_proc_info"))) - goto error; - if (!(unw_init_local = dlsym(libhandle, UNW_PREFIX "_init_local"))) - goto error; - if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) - goto error; - } -#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -99,9 +72,6 @@ return strerror(errno); } return NULL; - - error: - return dlerror(); } /************************************************************/ @@ -158,55 +128,8 @@ static char atfork_hook_installed = 0; -/* ****************************************************** - * libunwind workaround for process JIT frames correctly - * ****************************************************** - */ - #include "vmprof_get_custom_offset.h" -typedef struct { - void* _unused1; - void* _unused2; - void* sp; - void* ip; - void* _unused3[sizeof(unw_cursor_t)/sizeof(void*) - 4]; -} vmprof_hacked_unw_cursor_t; - -static int vmprof_unw_step(unw_cursor_t *cp, int first_run) -{ - void* ip; - void* sp; - ptrdiff_t sp_offset; - unw_get_reg (cp, UNW_REG_IP, (unw_word_t*)&ip); - unw_get_reg (cp, UNW_REG_SP, (unw_word_t*)&sp); - if (!first_run) { - // make sure we're pointing to the CALL and not to the first - // instruction after. If the callee adjusts the stack for us - // it's not safe to be at the instruction after - ip -= 1; - } - sp_offset = vmprof_unw_get_custom_offset(ip, cp); - - if (sp_offset == -1) { - // it means that the ip is NOT in JITted code, so we can use the - // stardard unw_step - return unw_step(cp); - } - else { - // this is a horrible hack to manually walk the stack frame, by - // setting the IP and SP in the cursor - vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; - void* bp = (void*)sp + sp_offset; - cp2->sp = bp; - bp -= sizeof(void*); - cp2->ip = ((void**)bp)[0]; - // the ret is on the top of the stack minus WORD - return 1; - } -} - - /* ************************************************************* * functions to dump the stack trace * ************************************************************* @@ -214,7 +137,6 @@ static int get_stack_trace(void **result, int max_depth, ucontext_t *ucontext) { - // read the first slot of shadowstack struct vmprof_stack* stack = vmprof_global_stack; int n = 0; while (n < max_depth - 1 && stack) { @@ -226,6 +148,7 @@ return n; } +#if 0 static int xxx_get_stack_trace(void** result, int max_depth, ucontext_t *ucontext) { void *ip; @@ -271,6 +194,7 @@ } return n; } +#endif static void *get_current_thread_id(void) { diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -22,4 +22,4 @@ RPY_EXTERN void *vmprof_address_of_global_stack(void) { return (void*)&vmprof_global_stack; -} \ No newline at end of file +} From pypy.commits at gmail.com Fri Jan 8 05:01:59 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 02:01:59 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: in-progress: work work work to port this to Python 3 Message-ID: <568f8917.4c0c1c0a.2c68d.395a@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2553:0680a4c2d249 Date: 2016-01-08 11:01 +0100 http://bitbucket.org/cffi/cffi/changeset/0680a4c2d249/ Log: in-progress: work work work to port this to Python 3 diff --git a/c/cffi1_module.c b/c/cffi1_module.c --- a/c/cffi1_module.c +++ b/c/cffi1_module.c @@ -212,5 +212,12 @@ (PyObject *)lib) < 0) return NULL; +#if PY_MAJOR_VERSION >= 3 + /* add manually 'module_name' in sys.modules: it seems that + Py_InitModule() is not enough to do that */ + if (PyDict_SetItemString(modules_dict, module_name, m) < 0) + return NULL; +#endif + return m; } diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -113,6 +113,19 @@ PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + */ + Py_InitializeEx(0); +} + static int _cffi_initialize_python(void) { /* This initializes Python, imports _cffi_backend, and then the @@ -122,21 +135,18 @@ PyGILState_STATE state; PyObject *pycode=NULL, *global_dict=NULL, *x; +#if PY_MAJOR_VERSION >= 3 + /* see comments in _cffi_carefully_make_gil() about the + Python2/Python3 difference + */ +#else /* Acquire the GIL. We have no threadstate here. If Python is already initialized, it is possible that there is already one existing for this thread, but it is not made current now. */ PyEval_AcquireLock(); - /* XXX use initsigs=0, which "skips initialization registration of - signal handlers, which might be useful when Python is - embedded" according to the Python docs. But review and think - if it should be a user-controllable setting. - - XXX we should also give a way to write errors to a buffer - instead of to stderr. - */ - Py_InitializeEx(0); + _cffi_py_initialize(); /* The Py_InitializeEx() sometimes made a threadstate for us, but not always. Indeed Py_InitializeEx() could be called and do @@ -150,6 +160,7 @@ correct threadstate. */ PyEval_ReleaseLock(); +#endif state = PyGILState_Ensure(); /* Call the initxxx() function from the present module. It will @@ -243,10 +254,19 @@ static int _cffi_carefully_make_gil(void) { - /* This initializes the GIL. It can be called completely - concurrently from unrelated threads. It assumes that we don't - hold the GIL before (if it exists), and we don't hold it - afterwards. + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + What it really does is completely different in Python 2 and + Python 3. + + Python 2 + ======== + + Initialize the GIL, without initializing the rest of Python, + by calling PyEval_InitThreads(). PyEval_InitThreads() must not be called concurrently at all. So we use a global variable as a simple spin lock. This global @@ -258,7 +278,20 @@ string "ENDMARKER". We change it temporarily to point to the next character in that string. (Yes, I know it's REALLY obscure.) + + Python 3 + ======== + + In Python 3, PyEval_InitThreads() cannot be called before + Py_InitializeEx() any more. So this function calls + Py_InitializeEx() first. It uses the same obscure logic to + make sure we never call it concurrently. + + Arguably, this is less good on the spinlock, because + Py_InitializeEx() takes much longer to run than + PyEval_InitThreads(). But I didn't find a way around it. */ + #ifdef WITH_THREAD char *volatile *lock = (char *volatile *)_PyParser_TokenNames; char *old_value; @@ -278,18 +311,38 @@ this is only run at start-up anyway. */ } } +#endif +#if PY_MAJOR_VERSION >= 3 + /* Python 3: call Py_InitializeEx() */ + { + PyGILState_STATE state = PyGILState_UNLOCKED; + if (!Py_IsInitialized()) + _cffi_py_initialize(); + else + state = PyGILState_Ensure(); + + PyEval_InitThreads(); + PyGILState_Release(state); + } +#else + /* Python 2: call PyEval_InitThreads() */ +# ifdef WITH_THREAD if (!PyEval_ThreadsInitialized()) { PyEval_InitThreads(); /* makes the GIL */ PyEval_ReleaseLock(); /* then release it */ } /* else: there is already a GIL, but we still needed to do the spinlock dance to make sure that we see it as fully ready */ +# endif +#endif +#ifdef WITH_THREAD /* release the lock */ while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) ; #endif + return 0; } @@ -350,20 +403,17 @@ Idea: * _cffi_carefully_make_gil(): "carefully" call - PyEval_InitThreads(). This can be called before - Py_Initialize(). + PyEval_InitThreads() (possibly with Py_InitializeEx() first). - * then we use a custom lock to make sure that a call to this - cffi-based extension will wait if another call to the same + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* extension is running the initialization in another thread. It is reentrant, so that a recursive call will not block, but only one from a different thread. - * then we grab the GIL and call Py_Initialize(), which will - initialize Python or do nothing if already initialized. We - know that concurrent calls to Py_Initialize() should not be - possible, even from different cffi-based extension, because - we have the GIL. + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. * do the rest of the specific initialization, which may temporarily release the GIL but not the custom lock. diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -10,11 +10,11 @@ ffi.embedding_init_code(""" from _embedding_cffi import ffi - print "preparing" # printed once + print("preparing") # printed once @ffi.def_extern() def add(x, y): - print "adding", x, "and", y + print("adding %d and %d" % (x, y)) return x + y """) diff --git a/demo/embedding_test.c b/demo/embedding_test.c --- a/demo/embedding_test.c +++ b/demo/embedding_test.c @@ -1,7 +1,7 @@ /* Link this program with libembedding_test.so. E.g. with gcc: - gcc -o embedding_test embedding_test.c _embedding_cffi.so + gcc -o embedding_test embedding_test.c _embedding_cffi*.so */ #include From pypy.commits at gmail.com Fri Jan 8 05:37:20 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 02:37:20 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: py3 compat Message-ID: <568f9160.08e11c0a.3c6f8.ffffe809@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2554:31afbf41f7b5 Date: 2016-01-08 11:37 +0100 http://bitbucket.org/cffi/cffi/changeset/31afbf41f7b5/ Log: py3 compat diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -22,6 +22,7 @@ @ffi.def_extern() def add1(x, y): sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() return x + y """) diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -18,6 +18,7 @@ @ffi.def_extern() def add2(x, y, z): sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() return x + y + z """) diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py --- a/testing/embedding/add3.py +++ b/testing/embedding/add3.py @@ -13,6 +13,7 @@ @ffi.def_extern() def add3(x, y, z, t): sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() return x + y + z + t """) diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -9,15 +9,18 @@ ffi.embedding_init_code(r""" from _add_recursive_cffi import ffi, lib - print "preparing REC" + import sys + print("preparing REC") + sys.stdout.flush() @ffi.def_extern() def add_rec(x, y): - print "adding %d and %d" % (x, y) + print("adding %d and %d" % (x, y)) + sys.stdout.flush() return x + y x = lib.my_callback(400) - print '<<< %d >>>' % (x,) + print('<<< %d >>>' % (x,)) """) ffi.set_source("_add_recursive_cffi", """ diff --git a/testing/embedding/perf.py b/testing/embedding/perf.py --- a/testing/embedding/perf.py +++ b/testing/embedding/perf.py @@ -18,4 +18,4 @@ """) fn = ffi.compile(verbose=True) -print 'FILENAME:', fn +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -97,7 +97,8 @@ env['LD_LIBRARY_PATH'] = libpath print('running %r in %r' % (name, path)) popen = subprocess.Popen([name], cwd=path, env=env, - stdout=subprocess.PIPE) + stdout=subprocess.PIPE, + universal_newlines=True) result = popen.stdout.read() err = popen.wait() if err: diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py --- a/testing/embedding/tlocal.py +++ b/testing/embedding/tlocal.py @@ -8,16 +8,21 @@ ffi.embedding_init_code(r""" from _tlocal_cffi import ffi - import thread, itertools + import itertools + try: + import thread + g_seen = itertools.count().next + except ImportError: + import _thread as thread # py3 + g_seen = itertools.count().__next__ tloc = thread._local() - g_seen = itertools.count() @ffi.def_extern() def add1(x, y): try: num = tloc.num except AttributeError: - num = tloc.num = g_seen.next() * 1000 + num = tloc.num = g_seen() * 1000 return x + y + num """) From pypy.commits at gmail.com Fri Jan 8 06:42:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 08 Jan 2016 03:42:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: malloc_nursery_* fixes, need to revive the shadowstack calls (e.g. call_footer_shadow_stack) to pass the full gc integration test Message-ID: <568fa0c0.62f3c20a.95bad.ffff8c14@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81619:b67d4bcd90bc Date: 2016-01-08 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/b67d4bcd90bc/ Log: malloc_nursery_* fixes, need to revive the shadowstack calls (e.g. call_footer_shadow_stack) to pass the full gc integration test diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -315,7 +315,11 @@ 'strdescr': arraydescr}) # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.calls == [(8, 15, 10), (1, 15, 15), ('str', 15)] + scale = lambda x: x if x in self.cpu.load_supported_factors else 1 + byte = lambda f,v: v if scale(f) != 1 else v*f + assert gc_ll_descr.calls == [(scale(8), 15, byte(8,10)), + (scale(5), 15, byte(5,3)), + ('str', byte(5,3))] # one fit, one was too large, one was not fitting def test_malloc_slowpath(self): diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -446,7 +446,7 @@ mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) saved_regs = [reg for reg in r.MANAGED_REGS if reg is not r.RES and reg is not r.RSZ] - self._push_core_regs_to_jitframe(mc, saved_regs) + self._push_core_regs_to_jitframe(mc, saved_regs + [r.r14]) self._push_fp_regs_to_jitframe(mc) # if kind == 'fixed': @@ -460,26 +460,27 @@ if kind == 'fixed': # compute the size we want - mc.LGR(r.r3, r.RES) - mc.SGR(r.r3, r.RSZ) + # r5 is saved to the jit frame + # RES == r2! + mc.LGR(r.r5, r.RSZ) + mc.SGR(r.r5, r.RES) + mc.LGR(r.r2, r.r5) if hasattr(self.cpu.gc_ll_descr, 'passes_frame'): # for tests only - mc.LGR(r.r4, r.SPP) + mc.LGR(r.r3, r.SPP) elif kind == 'str' or kind == 'unicode': pass # length is already in r3 else: # arguments to the called function are [itemsize, tid, length] - # itemsize is already in r3 - mc.LGR(r.r5, r.RSZ) # length - mc.LGR(r.r4, r.SCRATCH2) # tid + # itemsize is already in r2 + mc.LGR(r.r3, r.SCRATCH2) # tid + mc.LGR(r.r4, r.RSZ) # length # Do the call addr = rffi.cast(lltype.Signed, addr) + mc.push_std_frame() mc.load_imm(mc.RAW_CALL_REG, addr) - mc.push_std_frame() - mc.store_link() mc.raw_call() - mc.restore_link mc.pop_std_frame() self._reload_frame_if_necessary(mc) @@ -489,7 +490,7 @@ # emit_call_malloc_gc()). self.propagate_memoryerror_if_r2_is_null() - self._pop_core_regs_from_jitframe(mc, saved_regs) + self._pop_core_regs_from_jitframe(mc, saved_regs + [r.r14]) self._pop_fp_regs_from_jitframe(mc) nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() @@ -498,7 +499,7 @@ # r.SCRATCH is now the address of nursery_free # r.RES is still the result of the call done above # r.RSZ is loaded from [SCRATCH], to make the caller's store a no-op here - mc.load(r.RSZ, r.SCRATCH, 0) + mc.load(r.RSZ, r.r1, 0) # mc.BCR(c.ANY, r.r14) self.mc = None @@ -1200,15 +1201,15 @@ mc.load_imm(r.r1, nursery_free_adr) mc.load(r.RES, r.r1, 0) # load nursery_free - mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top mc.LGR(r.RSZ, r.RES) if check_imm_value(size): mc.AGHI(r.RSZ, l.imm(size)) else: - mc.load_imm(r.SCRATCH, l.imm(size)) - mc.AGR(r.RSZ, r.SCRATCH) + mc.load_imm(r.SCRATCH2, l.imm(size)) + mc.AGR(r.RSZ, r.SCRATCH2) + mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) fast_jmp_pos = mc.currpos() @@ -1218,9 +1219,7 @@ # new value of nursery_free_adr in RSZ and the adr of the new object # in RES. self.load_gcmap(mc, r.r1, gcmap) - # We are jumping to malloc_slowpath without a call through a function - # descriptor, because it is an internal call and "call" would trash - # r2 and r11 + # no frame needed, r14 is saved on the jitframe mc.branch_absolute(self.malloc_slowpath) offset = mc.currpos() - fast_jmp_pos @@ -1242,12 +1241,13 @@ mc.LGR(r.RSZ, r.RES) sizeloc = r.RSZ - mc.load(r.RES, l.addr(0, r.r1)) # load nursery_free - mc.load(r.SCRATCH2, l.addr(diff, r.r1)) # load nursery_top + mc.load(r.RES, r.r1, 0) # load nursery_free - mc.LGR(r.SCRATCH, r.RES) - mc.AGR(r.SCRATCH, sizeloc) # sizeloc can be RSZ - mc.LGR(r.RSZ, SCRATCH) + mc.LGR(r.SCRATCH2, r.RES) + mc.AGR(r.SCRATCH2, sizeloc) # sizeloc can be RSZ + mc.LGR(r.RSZ, r.SCRATCH2) + + mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) @@ -1261,7 +1261,7 @@ offset = mc.currpos() - fast_jmp_pos pmc = OverwritingBuilder(mc, fast_jmp_pos, 1) - pmc.BRC(l.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true + pmc.BRC(c.LE, l.imm(offset)) # jump if LE (not GT), predicted to be true pmc.overwrite() mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free @@ -1279,7 +1279,7 @@ if maxlength > 2**16-1: maxlength = 2**16-1 # makes things easier mc = self.mc - mc.cmp_op(lengthloc, maxlength, imm=True, signed=False) + mc.cmp_op(lengthloc, l.imm(maxlength), imm=True, signed=False) jmp_adr0 = mc.currpos() mc.reserve_cond_jump(short=True) # conditional jump, patched later @@ -1288,35 +1288,33 @@ # block of code for the case: the length is <= maxlength diff = nursery_top_adr - nursery_free_adr - assert _check_imm_arg(diff) + assert check_imm_value(diff) mc.load_imm(r.r1, nursery_free_adr) - # varsizeloc is either RSZ here, or equal to lengthloc if - # itemsize == 1. It is the size of the variable part of the - # array, in bytes. + # no shifting needed, lengthloc is already multiplied by the + # item size - mc.load(r.RES, l.addr(0, r.r1)) # load nursery_free - mc.load(r.SCRATCH2, l.addr(diff, r.r1)) # load nursery_top + mc.load(r.RES, r.r1, 0) # load nursery_free assert arraydescr.basesize >= self.gc_minimal_size_in_nursery constsize = arraydescr.basesize + self.gc_size_of_header force_realignment = (itemsize % WORD) != 0 if force_realignment: constsize += WORD - 1 - if varsizeloc is not r.RSZ: - mc.LGR(r.RSZ, varsizeloc) + if lengthloc is not r.RSZ: + mc.LGR(r.RSZ, lengthloc) mc.AGFI(r.RSZ, l.imm(constsize)) if force_realignment: # "& ~(WORD-1)" - xxx - bit_limit = 60 if WORD == 8 else 61 - mc.rldicr(r.RSZ.value, r.RSZ.value, 0, bit_limit) + mc.LGHI(r.SCRATCH2, l.imm(~(WORD-1))) + mc.NGR(r.RSZ, r.SCRATCH2) mc.AGR(r.RSZ, r.RES) # now RSZ contains the total size in bytes, rounded up to a multiple # of WORD, plus nursery_free_adr - mc.cmp_op(r.RSZ, r.SCRATCH, signed=False) + mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top + mc.cmp_op(r.RSZ, r.SCRATCH2, signed=False) jmp_adr1 = mc.currpos() mc.reserve_cond_jump(short=True) # conditional jump, patched later @@ -1353,7 +1351,9 @@ raise AssertionError(kind) # # call! + mc.push_std_frame() mc.branch_absolute(addr) + mc.pop_std_frame() jmp_location = mc.currpos() mc.reserve_cond_jump(short=True) # jump forward, patched later @@ -1370,14 +1370,14 @@ # write down the tid, but only in this case (not in other cases # where r.RES is the result of the CALL) mc.load_imm(r.SCRATCH2, arraydescr.tid) - mc.STG(r.SCRATCH2, l.addr(0, r.RES.value)) + mc.STG(r.SCRATCH2, l.addr(0, r.RES)) # while we're at it, this line is not needed if we've done the CALL - mc.store(r.RSZ, l.addr(0, r.r2)) # store into nursery_free + mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free # ------------------------------------------------------------ offset = mc.currpos() - jmp_location pmc = OverwritingBuilder(mc, jmp_location, 1) - pmc.BCR(c.ANY, l.imm(offset)) # jump always + pmc.BRC(c.ANY, l.imm(offset)) # jump always pmc.overwrite() def notimplemented_op(asm, op, arglocs, regalloc): diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -133,10 +133,9 @@ self.TRAP2() def trace(self): - self.SVC(l.imm(142)) - #self.LGHI(r.r2, 17) - #self.XGR(r.r3, r.r3) - #self.SVC(l.imm(17)) + self.LGHI(r.r2, l.imm(17)) + self.XGR(r.r3, r.r3) + self.SVC(l.imm(17)) def cmp_op(self, a, b, pool=False, imm=False, signed=True, fp=False): if fp == True: @@ -200,9 +199,10 @@ def reserve_cond_jump(self, short=False): self.trap() # conditional jump, patched later - self.trap() # conditional jump, patched later + self.trap() if not short: - self.trap() # conditional jump, patched later + # 6 bytes instead of 2 + self.trap() def branch_absolute(self, addr): self.load_imm(r.r14, addr) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -756,7 +756,6 @@ return [] def prepare_call_malloc_nursery_varsize_frame(self, op): - xxx sizeloc = self.ensure_reg(op.getarg(0)) # sizeloc must be in a register, but we can free it now # (we take care explicitly of conflicts with r.RES or r.RSZ) @@ -771,7 +770,6 @@ return [sizeloc] def prepare_call_malloc_nursery_varsize(self, op): - xxx # the result will be in r.RES self.rm.force_allocate_reg(op, selected_reg=r.RES) self.rm.temp_boxes.append(op) @@ -784,7 +782,7 @@ # sure it is in a register different from r.RES and r.RSZ. (It # should not be a ConstInt at all.) length_box = op.getarg(2) - lengthloc = self.ensure_reg(length_box) + lengthloc = self.ensure_reg(length_box, force_in_reg=True) return [lengthloc] From pypy.commits at gmail.com Fri Jan 8 10:56:18 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 07:56:18 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: trying win fix Message-ID: <568fdc22.c74fc20a.c7529.fffff6a5@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2555:4ba87915b6b2 Date: 2016-01-08 16:56 +0100 http://bitbucket.org/cffi/cffi/changeset/4ba87915b6b2/ Log: trying win fix diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -78,7 +78,14 @@ c = distutils.ccompiler.new_compiler() print('compiling %s with %r' % (name, modules)) extra_preargs = [] - if threads and sys.platform != 'win32': + if sys.platform == 'win32': + libfiles = [] + for m in modules: + assert '/' not in m and '\\' not in m + assert m.endswith('.pyd') + libfiles.append('Release\\%s.lib' % m[:-4]) + modules = libfiles + elif threads: extra_preargs.append('-pthread') objects = c.compile([filename], macros=sorted(defines.items()), debug=True) c.link_executable(objects + modules, name, extra_preargs=extra_preargs) From pypy.commits at gmail.com Fri Jan 8 10:57:18 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 07:57:18 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: fix Message-ID: <568fdc5e.9a6f1c0a.7b609.5e6e@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2556:27792b7b2983 Date: 2016-01-08 16:57 +0100 http://bitbucket.org/cffi/cffi/changeset/27792b7b2983/ Log: fix diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -81,7 +81,7 @@ if sys.platform == 'win32': libfiles = [] for m in modules: - assert '/' not in m and '\\' not in m + m = os.path.basename(m) assert m.endswith('.pyd') libfiles.append('Release\\%s.lib' % m[:-4]) modules = libfiles From pypy.commits at gmail.com Fri Jan 8 11:09:22 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 08:09:22 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: try Message-ID: <568fdf32.0f811c0a.ae6fe.67bc@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2557:260f5be65f3f Date: 2016-01-08 17:09 +0100 http://bitbucket.org/cffi/cffi/changeset/260f5be65f3f/ Log: try diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -103,7 +103,10 @@ libpath = path env['LD_LIBRARY_PATH'] = libpath print('running %r in %r' % (name, path)) - popen = subprocess.Popen([name], cwd=path, env=env, + executable_name = name + if sys.platform == 'win32': + executable_name += '.exe' + popen = subprocess.Popen([executable_name], cwd=path, env=env, stdout=subprocess.PIPE, universal_newlines=True) result = popen.stdout.read() From pypy.commits at gmail.com Fri Jan 8 11:10:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 08 Jan 2016 08:10:28 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Don't create the database automagically in .generate_graphs() Message-ID: <568fdf74.11301c0a.efb2b.58f1@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81621:f13f3ff0672b Date: 2016-01-06 01:40 +0100 http://bitbucket.org/pypy/pypy/changeset/f13f3ff0672b/ Log: Don't create the database automagically in .generate_graphs() diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -40,7 +40,8 @@ t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs() + db = cbuild.build_database() + cbuild.generate_graphs(db) entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph @@ -115,7 +116,8 @@ t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs() + db = cbuild.build_database() + cbuild.generate_graphs(db) def test_no_collect_detection(): from rpython.rlib import rgc @@ -140,7 +142,7 @@ cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) with py.test.raises(Exception) as f: - cbuild.generate_graphs() + cbuild.build_database() expected = "'no_collect' function can trigger collection: Author: Ronan Lamy Branch: exctrans Changeset: r81623:b729d770a6fe Date: 2016-01-06 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/b729d770a6fe/ Log: kill patch_graph() diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -389,9 +389,9 @@ for graph in node.graphs_to_patch(): graphs.append(graph) self.gctransformer.prepare_inline_helpers(graphs) - for node in funcnodes: - if getattr(node, 'funcgen', None): - node.funcgen.patch_graph() + if self.gctransformer.inline: + for graph in graphs: + self.gctransformer.inline_helpers(graph) def all_graphs(self): graphs = [] diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -92,12 +92,6 @@ def name(self, cname): #virtual return cname - def patch_graph(self): - graph = self.graph - if self.db.gctransformer and self.db.gctransformer.inline: - self.db.gctransformer.inline_helpers(graph) - return graph - def implementation_begin(self): SSI_to_SSA(self.graph) self.collect_var_and_types() From pypy.commits at gmail.com Fri Jan 8 11:10:30 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 08 Jan 2016 08:10:30 -0800 (PST) Subject: [pypy-commit] pypy exctrans: merge generate_graphs() into db.complete() Message-ID: <568fdf76.4c0c1c0a.2c68d.ffffbab7@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81622:ecb9259b9257 Date: 2016-01-06 03:07 +0100 http://bitbucket.org/pypy/pypy/changeset/ecb9259b9257/ Log: merge generate_graphs() into db.complete() diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -346,6 +346,7 @@ break # database is now complete assert not self.delayedfunctionptrs + self.inline_gc_helpers() self.completed = True if show_progress: dump() @@ -378,7 +379,9 @@ produce(node) return result - def prepare_inline_helpers(self): + def inline_gc_helpers(self): + if self.gctransformer is None: + return all_nodes = self.globalcontainers() funcnodes = [node for node in all_nodes if node.nodekind == 'func'] graphs = [] @@ -386,6 +389,9 @@ for graph in node.graphs_to_patch(): graphs.append(graph) self.gctransformer.prepare_inline_helpers(graphs) + for node in funcnodes: + if getattr(node, 'funcgen', None): + node.funcgen.patch_graph() def all_graphs(self): graphs = [] diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -195,19 +195,10 @@ DEBUG_DEFINES = {'RPY_ASSERT': 1, 'RPY_LL_ASSERT': 1} - def generate_graphs(self, db): - """"Prepare the graphs.""" - db.prepare_inline_helpers() - for node in db.containerlist: - if getattr(node, 'funcgen', None): - node.funcgen.patch_graph() - return db - def generate_source(self, db=None, defines={}, exe_name=None): assert self.c_source_filename is None if db is None: db = self.build_database() - self.generate_graphs(db) pf = self.getentrypointptr() if self.modulename is None: self.modulename = uniquemodulename('testing') diff --git a/rpython/translator/c/test/test_database.py b/rpython/translator/c/test/test_database.py --- a/rpython/translator/c/test/test_database.py +++ b/rpython/translator/c/test/test_database.py @@ -9,8 +9,6 @@ def dump_on_stdout(database): - if database.gctransformer: - database.prepare_inline_helpers() print '/*********************************/' structdeflist = database.getstructdeflist() for node in structdeflist: From pypy.commits at gmail.com Fri Jan 8 11:10:35 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 08 Jan 2016 08:10:35 -0800 (PST) Subject: [pypy-commit] pypy exctrans: merge prepare_inline_helpers() and inline_helpers(); simplify Message-ID: <568fdf7b.482e1c0a.13f84.612d@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81625:c1d9d612f60e Date: 2016-01-06 14:53 +0100 http://bitbucket.org/pypy/pypy/changeset/c1d9d612f60e/ Log: merge prepare_inline_helpers() and inline_helpers(); simplify diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -113,21 +113,14 @@ self.seen_graphs.add(graph) self.minimal_transform.add(graph) - def prepare_inline_helpers(self, graphs): + def inline_helpers(self, graphs): from rpython.translator.backendopt.inline import iter_callsites + raise_analyzer = RaiseAnalyzer(self.translator) for graph in graphs: - self.graph_dependencies[graph] = {} + to_enum = [] for called, block, i in iter_callsites(graph, None): if called in self.graphs_to_inline: - self.graph_dependencies[graph][called] = True - self.prepared = True - - def inline_helpers(self, graph): - if not self.prepared: - raise Exception("Need to call prepare_inline_helpers first") - if self.inline: - raise_analyzer = RaiseAnalyzer(self.translator) - to_enum = self.graph_dependencies.get(graph, self.graphs_to_inline) + to_enum.append(called) must_constfold = False for inline_graph in to_enum: try: diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -346,8 +346,9 @@ break # database is now complete assert not self.delayedfunctionptrs - self.inline_gc_helpers() self.completed = True + if self.gctransformer is not None and self.gctransformer.inline: + self.gctransformer.inline_helpers(self.all_graphs()) if show_progress: dump() log.database("Completed") @@ -379,20 +380,6 @@ produce(node) return result - def inline_gc_helpers(self): - if self.gctransformer is None: - return - all_nodes = self.globalcontainers() - funcnodes = [node for node in all_nodes if node.nodekind == 'func'] - graphs = [] - for node in funcnodes: - for graph in node.graphs_to_patch(): - graphs.append(graph) - self.gctransformer.prepare_inline_helpers(graphs) - if self.gctransformer.inline: - for graph in graphs: - self.gctransformer.inline_helpers(graph) - def all_graphs(self): graphs = [] for node in self.containerlist: From pypy.commits at gmail.com Fri Jan 8 11:10:33 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 08 Jan 2016 08:10:33 -0800 (PST) Subject: [pypy-commit] pypy exctrans: fix tests Message-ID: <568fdf79.8205c20a.c5c42.1f42@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81624:054945ac51d4 Date: 2016-01-06 12:56 +0100 http://bitbucket.org/pypy/pypy/changeset/054945ac51d4/ Log: fix tests diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -41,7 +41,6 @@ cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) db = cbuild.build_database() - cbuild.generate_graphs(db) entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph @@ -117,7 +116,6 @@ cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) db = cbuild.build_database() - cbuild.generate_graphs(db) def test_no_collect_detection(): from rpython.rlib import rgc @@ -257,7 +255,6 @@ cbuild = CStandaloneBuilder(t, g, t.config, gcpolicy=FrameworkGcPolicy2) db = cbuild.build_database() - cbuild.generate_graphs(db) ff = graphof(t, f) #ff.show() diff --git a/rpython/memory/gctransform/test/test_transform.py b/rpython/memory/gctransform/test/test_transform.py --- a/rpython/memory/gctransform/test/test_transform.py +++ b/rpython/memory/gctransform/test/test_transform.py @@ -16,7 +16,7 @@ t = rtype(f, args_s) # XXX we shouldn't need an actual gcpolicy here. cbuild = CStandaloneBuilder(t, f, t.config, gcpolicy=self.gcpolicy) - cbuild.generate_graphs() + cbuild.build_database() graph = cbuild.getentrypointptr()._obj.graph # arguments cannot be GC objects because nobody would put a # proper header on them diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -111,7 +111,6 @@ cbuild = CStandaloneBuilder(t, entrypoint, config=t.config, gcpolicy=cls.gcpolicy) db = cbuild.build_database() - cbuild.generate_graphs(db) entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph if option.view: From pypy.commits at gmail.com Fri Jan 8 11:12:07 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 08:12:07 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: try Message-ID: <568fdfd7.4f911c0a.323ea.7262@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2558:d13754540a7c Date: 2016-01-08 17:11 +0100 http://bitbucket.org/cffi/cffi/changeset/d13754540a7c/ Log: try diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -105,7 +105,7 @@ print('running %r in %r' % (name, path)) executable_name = name if sys.platform == 'win32': - executable_name += '.exe' + executable_name = os.path.join(path, executable_name + '.exe') popen = subprocess.Popen([executable_name], cwd=path, env=env, stdout=subprocess.PIPE, universal_newlines=True) From pypy.commits at gmail.com Fri Jan 8 11:25:01 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 08 Jan 2016 08:25:01 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge with default Message-ID: <568fe2dd.4c0c1c0a.2c68d.ffffc04b@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81626:a699f3807ffd Date: 2016-01-05 13:35 -0500 http://bitbucket.org/pypy/pypy/changeset/a699f3807ffd/ Log: Merge with default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -559,6 +559,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +593,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy @@ -101,3 +103,10 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -299,7 +299,7 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -65,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -97,6 +97,18 @@ def __init__(self, buffer, offset, size): self.readonly = buffer.readonly + if isinstance(buffer, SubBuffer): # don't nest them + # we want a view (offset, size) over a view + # (buffer.offset, buffer.size) over buffer.buffer. + # Note that either '.size' can be -1 to mean 'up to the end'. + at_most = buffer.getlength() - offset + if size > at_most or size < 0: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -109,20 +109,3 @@ "you. Another difference is that entrypoint_highlevel() " "returns the normal Python function, which can be safely " "called from more Python code.") - - -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - - at entrypoint_lowlevel('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py - RPython_StartupCode() - rffi.stackcounter.stacks_counter -= 1 diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -335,6 +335,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2794,8 +2794,10 @@ def parse_digit_string(parser): # helper for fromstr + base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) a = rbigint() - base = parser.base digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2813,52 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + from rpython.rlib.rstring import ParseStringError + + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff --git a/rpython/rlib/rshrinklist.py b/rpython/rlib/rshrinklist.py --- a/rpython/rlib/rshrinklist.py +++ b/rpython/rlib/rshrinklist.py @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -291,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -351,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -378,20 +381,41 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). + """ + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) @@ -399,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py --- a/rpython/rlib/rweaklist.py +++ b/rpython/rlib/rweaklist.py @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -45,3 +45,22 @@ ssbuf = SubBuffer(sbuf, 3, 2) assert ssbuf.getslice(0, 2, 1, 2) == 'ld' assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 + +def test_repeated_subbuffer(): + buf = StringBuffer('x' * 10000) + for i in range(9999, 9, -1): + buf = SubBuffer(buf, 1, i) + assert buf.getlength() == 10 diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -6,7 +6,8 @@ prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, - setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) + setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, + fetch_translated_config) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -439,6 +440,13 @@ res = self.interpret(f, [42]) assert res == 84 + def test_fetch_translated_config(self): + assert fetch_translated_config() is None + def f(): + return fetch_translated_config().translation.continuation + res = self.interpret(f, []) + assert res is False + def test_specialize_decorator(): def f(): diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -825,7 +825,19 @@ def __init__(self, base, sign, digits): self.base = base self.sign = sign - self.next_digit = iter(digits + [-1]).next + self.i = 0 + self._digits = digits + def next_digit(self): + i = self.i + if i == len(self._digits): + return -1 + self.i = i + 1 + return self._digits[i] + def prev_digit(self): + i = self.i - 1 + assert i >= 0 + self.i = i + return self._digits[i] x = parse_digit_string(Parser(10, 1, [6])) assert x.eq(rbigint.fromint(6)) x = parse_digit_string(Parser(10, 1, [6, 2, 3])) @@ -847,6 +859,16 @@ x = parse_digit_string(Parser(7, -1, [0, 0, 0])) assert x.tobool() is False + for base in [2, 4, 8, 16, 32]: + for inp in [[0], [1], [1, 0], [0, 1], [1, 0, 1], [1, 0, 0, 1], + [1, 0, 0, base-1, 0, 1], [base-1, 1, 0, 0, 0, 1, 0], + [base-1]]: + inp = inp * 97 + x = parse_digit_string(Parser(base, -1, inp)) + num = sum(inp[i] * (base ** (len(inp)-1-i)) + for i in range(len(inp))) + assert x.eq(rbigint.fromlong(-num)) + BASE = 2 ** SHIFT diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -1,6 +1,7 @@ import gc, time from rpython.rlib.rthread import * from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib import objectmodel from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py @@ -240,3 +241,60 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + + def test_tlref_keepalive(self, no__thread=True): + import weakref + from rpython.config.translationoption import SUPPORT__THREAD + + if not (SUPPORT__THREAD or no__thread): + py.test.skip("no __thread support here") + + class FooBar(object): + pass + t = ThreadLocalReference(FooBar) + + def tset(): + x1 = FooBar() + t.set(x1) + return weakref.ref(x1) + tset._dont_inline_ = True + + class WrFromThread: + pass + wr_from_thread = WrFromThread() + + def f(): + config = objectmodel.fetch_translated_config() + assert t.automatic_keepalive(config) is True + wr = tset() + import gc; gc.collect() # 'x1' should not be collected + x2 = t.get() + assert x2 is not None + assert wr() is not None + assert wr() is x2 + return wr + + def thread_entry_point(): + wr = f() + wr_from_thread.wr = wr + wr_from_thread.seen = True + + def main(): + wr_from_thread.seen = False + start_new_thread(thread_entry_point, ()) + wr1 = f() + time.sleep(0.5) + assert wr_from_thread.seen is True + wr2 = wr_from_thread.wr + import gc; gc.collect() # wr2() should be collected here + assert wr1() is not None # this thread, still running + assert wr2() is None # other thread, not running any more + return 42 + + extra_options = {'no__thread': no__thread, 'shared': True} + fn = self.getcompiled(main, [], extra_options=extra_options) + res = fn() + assert res == 42 + + def test_tlref_keepalive__thread(self): + self.test_tlref_keepalive(no__thread=False) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -950,6 +950,13 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_acquire(self, prev): + raise NotImplementedError + def op_threadlocalref_release(self, prev): + raise NotImplementedError + def op_threadlocalref_enum(self, prev): + raise NotImplementedError + # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -545,8 +545,11 @@ 'getslice': LLOp(canraise=(Exception,)), 'check_and_clear_exc': LLOp(), - 'threadlocalref_addr': LLOp(sideeffects=False), # get (or make) addr of tl + 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_acquire': LLOp(), # lock for enum + 'threadlocalref_release': LLOp(), # lock for enum + 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ 'debug_view': LLOp(), diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -733,6 +733,9 @@ print >> f, 'struct pypy_threadlocal_s {' print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' + print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' + # note: if the four fixed fields above are changed, you need + # to adapt threadlocal.c's linkedlist_head declaration too for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,24 @@ # include #endif +void rpython_startup_code(void) +{ +#ifdef RPY_WITH_GIL + RPyGilAcquire(); +#endif +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; +#endif + pypy_asm_stack_bottom(); + RPython_StartupCode(); +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter--; +#endif +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif +} + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -48,7 +48,7 @@ } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - rpy_fastgil = 0; + lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -231,10 +231,19 @@ return (result != WAIT_TIMEOUT); } -#define mutex1_t mutex2_t -#define mutex1_init mutex2_init -#define mutex1_lock mutex2_lock -#define mutex1_unlock mutex2_unlock +typedef CRITICAL_SECTION mutex1_t; + +static inline void mutex1_init(mutex1_t *mutex) { + InitializeCriticalSection(mutex); +} + +static inline void mutex1_lock(mutex1_t *mutex) { + EnterCriticalSection(mutex); +} + +static inline void mutex1_unlock(mutex1_t *mutex) { + LeaveCriticalSection(mutex); +} //#define lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -38,3 +38,4 @@ #else #define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif +#define lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -81,3 +81,4 @@ #define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define lock_release(ptr) __sync_lock_release(ptr) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -3,20 +3,99 @@ #include #include #include -#ifndef _WIN32 -# include +#include +#include "src/threadlocal.h" +#include "src/thread.h" + + +/* this is a spin-lock that must be acquired around each doubly-linked-list + manipulation (because such manipulations can occur without the GIL) */ +static long pypy_threadlocal_lock = 0; + +static int check_valid(void); + +void _RPython_ThreadLocals_Acquire(void) { + while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } + assert(check_valid()); +} +void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); + lock_release(&pypy_threadlocal_lock); +} + + +pthread_key_t pypy_threadlocal_key +#ifdef _WIN32 += TLS_OUT_OF_INDEXES #endif -#include "src/threadlocal.h" +; +static struct pypy_threadlocal_s linkedlist_head = { + -1, /* ready */ + NULL, /* stack_end */ + &linkedlist_head, /* prev */ + &linkedlist_head }; /* next */ + +static int check_valid(void) +{ + struct pypy_threadlocal_s *prev, *cur; + prev = &linkedlist_head; + while (1) { + cur = prev->next; + assert(cur->prev == prev); + if (cur == &linkedlist_head) + break; + assert(cur->ready == 42); + assert(cur->next != cur); + prev = cur; + } + assert(cur->ready == -1); + return 1; +} + +static void cleanup_after_fork(void) +{ + /* assume that at most one pypy_threadlocal_s survived, the current one */ + struct pypy_threadlocal_s *cur; +#ifdef USE___THREAD + cur = &pypy_threadlocal; +#else + cur = (struct pypy_threadlocal_s *)_RPy_ThreadLocals_Get(); +#endif + if (cur && cur->ready == 42) { + cur->next = cur->prev = &linkedlist_head; + linkedlist_head.next = linkedlist_head.prev = cur; + } + else { + linkedlist_head.next = linkedlist_head.prev = &linkedlist_head; + } + _RPython_ThreadLocals_Release(); +} + + +struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) +{ + if (prev == NULL) + prev = &linkedlist_head; + if (prev->next == &linkedlist_head) + return NULL; + return prev->next; +} static void _RPy_ThreadLocals_Init(void *p) { + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + struct pypy_threadlocal_s *oldnext; memset(p, 0, sizeof(struct pypy_threadlocal_s)); + #ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; + tls->p_errno = &errno; #endif #ifdef RPY_TLOFS_thread_ident - ((struct pypy_threadlocal_s *)p)->thread_ident = + tls->thread_ident = # ifdef _WIN32 GetCurrentThreadId(); # else @@ -26,58 +105,80 @@ where it is not the case are rather old nowadays. */ # endif #endif - ((struct pypy_threadlocal_s *)p)->ready = 42; + _RPython_ThreadLocals_Acquire(); + oldnext = linkedlist_head.next; + tls->prev = &linkedlist_head; + tls->next = oldnext; + linkedlist_head.next = tls; + oldnext->prev = tls; + tls->ready = 42; + _RPython_ThreadLocals_Release(); } +static void threadloc_unlink(void *p) +{ + /* warning: this can be called at completely random times without + the GIL. */ + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + _RPython_ThreadLocals_Acquire(); + if (tls->ready == 42) { + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; + } + _RPython_ThreadLocals_Release(); +#ifndef USE___THREAD + free(p); +#endif +} -/* ------------------------------------------------------------ */ -#ifdef USE___THREAD -/* ------------------------------------------------------------ */ +#ifdef _WIN32 +/* xxx Defines a DllMain() function. It's horrible imho: it only + works if we happen to compile a DLL (not a EXE); and of course you + get link-time errors if two files in the same DLL do the same. + There are some alternatives known, but they are horrible in other + ways (e.g. using undocumented behavior). This seems to be the + simplest, but feel free to fix if you need that. - -/* in this situation, we always have one full 'struct pypy_threadlocal_s' - available, managed by gcc. */ -__thread struct pypy_threadlocal_s pypy_threadlocal; + For this reason we have the line 'not _win32 or config.translation.shared' + in rpython.rlib.rthread. +*/ +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + switch (reason_for_call) { + case DLL_THREAD_DETACH: + if (pypy_threadlocal_key != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(pypy_threadlocal_key); + if (p != NULL) { + TlsSetValue(pypy_threadlocal_key, NULL); + threadloc_unlink(p); + } + } + break; + default: + break; + } + return TRUE; +} +#endif void RPython_ThreadLocals_ProgramInit(void) { - _RPy_ThreadLocals_Init(&pypy_threadlocal); -} - -char *_RPython_ThreadLocals_Build(void) -{ - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); - _RPy_ThreadLocals_Init(&pypy_threadlocal); - return (char *)&pypy_threadlocal; -} - -void RPython_ThreadLocals_ThreadDie(void) -{ - memset(&pypy_threadlocal, 0xDD, - sizeof(struct pypy_threadlocal_s)); /* debug */ - pypy_threadlocal.ready = 0; -} - - -/* ------------------------------------------------------------ */ -#else -/* ------------------------------------------------------------ */ - - -/* this is the case where the 'struct pypy_threadlocal_s' is allocated - explicitly, with malloc()/free(), and attached to (a single) thread- - local key using the API of Windows or pthread. */ - -pthread_key_t pypy_threadlocal_key; - - -void RPython_ThreadLocals_ProgramInit(void) -{ + /* Initialize the pypy_threadlocal_key, together with a destructor + that will be called every time a thread shuts down (if there is + a non-null thread-local value). This is needed even in the + case where we use '__thread' below, for the destructor. + */ + assert(pypy_threadlocal_lock == 0); #ifdef _WIN32 pypy_threadlocal_key = TlsAlloc(); if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) #else - if (pthread_key_create(&pypy_threadlocal_key, NULL) != 0) + if (pthread_key_create(&pypy_threadlocal_key, threadloc_unlink) != 0) #endif { fprintf(stderr, "Internal RPython error: " @@ -85,8 +186,53 @@ abort(); } _RPython_ThreadLocals_Build(); + +#ifndef _WIN32 + pthread_atfork(_RPython_ThreadLocals_Acquire, + _RPython_ThreadLocals_Release, + cleanup_after_fork); +#endif } + +/* ------------------------------------------------------------ */ +#ifdef USE___THREAD +/* ------------------------------------------------------------ */ + + +/* in this situation, we always have one full 'struct pypy_threadlocal_s' + available, managed by gcc. */ +__thread struct pypy_threadlocal_s pypy_threadlocal; + +char *_RPython_ThreadLocals_Build(void) +{ + RPyAssert(pypy_threadlocal.ready == 0, "unclean thread-local"); + _RPy_ThreadLocals_Init(&pypy_threadlocal); + + /* we also set up &pypy_threadlocal as a POSIX thread-local variable, + because we need the destructor behavior. */ + pthread_setspecific(pypy_threadlocal_key, (void *)&pypy_threadlocal); + + return (char *)&pypy_threadlocal; +} + +void RPython_ThreadLocals_ThreadDie(void) +{ + pthread_setspecific(pypy_threadlocal_key, NULL); + threadloc_unlink(&pypy_threadlocal); +} + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +/* this is the case where the 'struct pypy_threadlocal_s' is allocated + explicitly, with malloc()/free(), and attached to (a single) thread- + local key using the API of Windows or pthread. */ + + char *_RPython_ThreadLocals_Build(void) { void *p = malloc(sizeof(struct pypy_threadlocal_s)); @@ -105,8 +251,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - memset(p, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ - free(p); + threadloc_unlink(p); /* includes free(p) */ } } diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -13,14 +13,24 @@ to die. */ RPY_EXTERN void RPython_ThreadLocals_ThreadDie(void); -/* There are two llops: 'threadlocalref_addr' and 'threadlocalref_make'. - They both return the address of the thread-local structure (of the - C type 'struct pypy_threadlocal_s'). The difference is that - OP_THREADLOCALREF_MAKE() checks if we have initialized this thread- - local structure in the current thread, and if not, calls the following - helper. */ +/* 'threadlocalref_addr' returns the address of the thread-local + structure (of the C type 'struct pypy_threadlocal_s'). It first + checks if we have initialized this thread-local structure in the + current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN void _RPython_ThreadLocals_Acquire(void); +RPY_EXTERN void _RPython_ThreadLocals_Release(void); + +/* Must acquire/release the thread-local lock around a series of calls + to the following function */ +RPY_EXTERN struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); + +#define OP_THREADLOCALREF_ACQUIRE(r) _RPython_ThreadLocals_Acquire() +#define OP_THREADLOCALREF_RELEASE(r) _RPython_ThreadLocals_Release() +#define OP_THREADLOCALREF_ENUM(p, r) r = _RPython_ThreadLocals_Enum(p) + /* ------------------------------------------------------------ */ #ifdef USE___THREAD @@ -29,6 +39,8 @@ /* Use the '__thread' specifier, so far only on Linux */ +#include + RPY_EXTERN __thread struct pypy_threadlocal_s pypy_threadlocal; #define OP_THREADLOCALREF_ADDR(r) \ @@ -64,8 +76,6 @@ # define _RPy_ThreadLocals_Set(x) pthread_setspecific(pypy_threadlocal_key, x) #endif -RPY_EXTERN pthread_key_t pypy_threadlocal_key; - #define OP_THREADLOCALREF_ADDR(r) \ do { \ @@ -87,6 +97,9 @@ /* ------------------------------------------------------------ */ +RPY_EXTERN pthread_key_t pypy_threadlocal_key; + + /* only for the fall-back path in the JIT */ #define OP_THREADLOCALREF_GET_NONCONST(RESTYPE, offset, r) \ do { \ diff --git a/rpython/translator/c/test/test_boehm.py b/rpython/translator/c/test/test_boehm.py --- a/rpython/translator/c/test/test_boehm.py +++ b/rpython/translator/c/test/test_boehm.py @@ -23,6 +23,7 @@ class AbstractGCTestClass(object): gcpolicy = "boehm" use_threads = False + extra_options = {} # deal with cleanups def setup_method(self, meth): @@ -33,8 +34,10 @@ #print "CLEANUP" self._cleanups.pop()() - def getcompiled(self, func, argstypelist=[], annotatorpolicy=None): - return compile(func, argstypelist, gcpolicy=self.gcpolicy, thread=self.use_threads) + def getcompiled(self, func, argstypelist=[], annotatorpolicy=None, + extra_options={}): + return compile(func, argstypelist, gcpolicy=self.gcpolicy, + thread=self.use_threads, **extra_options) class TestUsingBoehm(AbstractGCTestClass): diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -96,6 +96,8 @@ continue if name == 'pypy_debug_file': # ok to export this one continue + if name == 'rpython_startup_code': # ok for this one too + continue if 'pypy' in name.lower() or 'rpy' in name.lower(): raise Exception("Unexpected exported name %r. " "What is likely missing is RPY_EXTERN before the " diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -203,9 +203,8 @@ try: points = secondary_entrypoints[key] except KeyError: - raise KeyError( - "Entrypoints not found. I only know the keys %r." % - (", ".join(secondary_entrypoints.keys()), )) + raise KeyError("Entrypoint %r not found (not in %r)" % + (key, secondary_entrypoints.keys())) self.secondary_entrypoints.extend(points) self.translator.driver_instrument_result = self.instrument_result From pypy.commits at gmail.com Fri Jan 8 11:25:03 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 08 Jan 2016 08:25:03 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge with default Message-ID: <568fe2df.9a6f1c0a.7b609.6938@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81627:6e223f0e19d6 Date: 2016-01-08 11:24 -0500 http://bitbucket.org/pypy/pypy/changeset/6e223f0e19d6/ Log: Merge with default diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': diff --git a/pypy/module/test_lib_pypy/test_cPickle.py b/pypy/module/test_lib_pypy/test_cPickle.py --- a/pypy/module/test_lib_pypy/test_cPickle.py +++ b/pypy/module/test_lib_pypy/test_cPickle.py @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -719,7 +719,8 @@ def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): if bytes != 8: raise Exception("gc_load_indexed_f is only for 'double'!") - return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -399,9 +399,7 @@ rop.GC_LOAD_I, rop.GC_LOAD_R, rop.GC_LOAD_F, - rop.GC_LOAD_INDEXED_I, rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_F, rop.GC_STORE, rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -696,58 +696,6 @@ # ---------- - def test_virtual_1(self): - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - setfield_gc(p0, i1, descr=valuedescr) - jump(i, p0) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_float(self): - ops = """ - [f, p0] - f0 = getfield_gc(p0, descr=floatdescr) - f1 = float_add(f0, f) - setfield_gc(p0, f1, descr=floatdescr) - jump(f, p0) - """ - expected = """ - [f, f2] - f1 = float_add(f2, f) - jump(f, f1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected) - - def test_virtual_2(self): - py.test.skip("XXX") - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - p1 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p1, i1, descr=valuedescr) - jump(i, p1) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_oois(self): ops = """ [p0, p1, p2] @@ -774,20 +722,6 @@ guard_false(i12) [] jump(p0, p1, p2) """ - expected = """ - [p2] - # all constant-folded :-) - jump(p2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable), - Virtual(node_vtable), - Not''', - expected) - # - # to be complete, we also check the no-opt case where most comparisons - # are not removed. The exact set of comparisons removed depends on - # the details of the algorithm... expected2 = """ [p0, p1, p2] guard_nonnull(p0) [] @@ -801,26 +735,6 @@ """ self.optimize_loop(ops, expected2) - def test_virtual_default_field(self): - py.test.skip("XXX") - ops = """ - [p0] - i0 = getfield_gc(p0, descr=valuedescr) - guard_value(i0, 0) [] - p1 = new_with_vtable(ConstClass(node_vtable)) - # the field 'value' has its default value of 0 - jump(p1) - """ - expected = """ - [i] - guard_value(i, 0) [] - jump(0) - """ - # the 'expected' is sub-optimal, but it should be done by another later - # optimization step. See test_find_nodes_default_field() for why. - self.optimize_loop(ops, 'Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_3(self): ops = """ [i] @@ -837,55 +751,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_4(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - jump(i3, p1) - """ - expected = """ - [i0, i1] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_5(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p2 = new_with_vtable(descr=nodesize2) - setfield_gc(p2, i1, descr=valuedescr) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - setfield_gc(p1, p2, descr=nextdescr) - jump(i3, p1) - """ - expected = """ - [i0, i1, i1bis] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2, i1) - """ - self.optimize_loop(ops, - '''Not, Virtual(node_vtable, - valuedescr=Not, - nextdescr=Virtual(node_vtable2, - valuedescr=Not))''', - expected) - def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1208,27 +1073,6 @@ """ self.optimize_loop(ops, expected) - def test_varray_2(self): - ops = """ - [i0, p1] - i1 = getarrayitem_gc(p1, 0, descr=arraydescr) - i2 = getarrayitem_gc(p1, 1, descr=arraydescr) - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - p2 = new_array(2, descr=arraydescr) - setarrayitem_gc(p2, 1, i0, descr=arraydescr) - setarrayitem_gc(p2, 0, 20, descr=arraydescr) - jump(i0, p2) - """ - expected = """ - [i0, i1, i2] - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - jump(i0, 20, i0) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr, Not, Not)', expected) - def test_p123_array(self): ops = """ [i1, p2, p3] @@ -1263,23 +1107,6 @@ """ self.optimize_loop(ops, expected) - def test_vstruct_1(self): - py.test.skip("XXX") - ops = """ - [i1, p2] - i2 = getfield_gc(p2, descr=adescr) - escape_n(i2) - p3 = new(descr=ssize) - setfield_gc(p3, i1, descr=adescr) - jump(i1, p3) - """ - expected = """ - [i1, i2] - escape_n(i2) - jump(i1, i1) - """ - self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected) - def test_p123_vstruct(self): ops = """ [i1, p2, p3] @@ -1442,26 +1269,6 @@ """ self.optimize_loop(ops, expected) - def test_duplicate_getfield_guard_value_const(self): - ops = """ - [p1] - guard_value(p1, ConstPtr(myptr)) [] - i1 = getfield_gc_i(p1, descr=valuedescr) - i2 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i2) - jump(p1) - """ - expected = """ - [] - i1 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i1) - jump() - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr)', expected) - def test_duplicate_getfield_sideeffects_1(self): ops = """ [p1] @@ -1687,12 +1494,12 @@ jump(p1, i1, i2) """ expected = """ - [i1, i2] + [p1, i1, i2] + guard_value(p1, ConstPtr(myptr)) [] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) - jump(i1, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr), Not, Not', expected) + jump(ConstPtr(myptr), i1, i2) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_1(self): ops = """ @@ -1869,163 +1676,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_1(self): - ops = """ - [i0, p1] - p4 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_with_vtable(descr=nodesize) - p3 = escape_r() - setfield_gc(p2, p3, descr=nextdescr) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, nextdescr=Not)', - expected) - - def test_bug_2(self): - ops = """ - [i0, p1] - p4 = getarrayitem_gc(p1, 0, descr=arraydescr2) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_array(1, descr=arraydescr2) - p3 = escape_r() - setarrayitem_gc(p2, 0, p3, descr=arraydescr2) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr2, Not)', - expected) - - def test_bug_3(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - p2a = new_with_vtable(descr=nodesize) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_bug_3bis(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_invalid_loop_1(self): - ops = """ - [p1] - guard_isnull(p1) [] - # - p2 = new_with_vtable(descr=nodesize) - jump(p2) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, - ops, 'Virtual(node_vtable)', None) - - def test_invalid_loop_2(self): - py.test.skip("this would fail if we had Fixed again in the specnodes") - ops = """ - [p1] - guard_class(p1, ConstClass(node_vtable2)) [] - # - p2 = new_with_vtable(descr=nodesize) - escape_n(p2) # prevent it from staying Virtual - jump(p2) - """ - py.test.raises(InvalidLoop, self.optimize_loop, - ops, '...', None) - - def test_invalid_loop_3(self): - ops = """ - [p1] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_isnull(p2) [] - # - p3 = new_with_vtable(descr=nodesize) - p4 = new_with_vtable(descr=nodesize) - setfield_gc(p3, p4, descr=nextdescr) - jump(p3) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, ops, - 'Virtual(node_vtable, nextdescr=Virtual(node_vtable))', - None) - def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2059,7 +1710,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2077,7 +1727,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2624,26 +2273,6 @@ where p2 is a node_vtable, valuedescr=i2, nextdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_6(self): - ops = """ - [p0, i0, i1] - guard_true(i0) [p0] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i1, i1) - """ - expected = """ - [i1b, i0, i1] - guard_true(i0) [i1b] - jump(i1, i1, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable, valuedescr=Not), - Not, Not''', expected) - self.check_expanded_fail_descr('''p0 - where p0 is a node_vtable, valuedescr=i1b - ''', rop.GUARD_TRUE) - def test_expand_fail_varray(self): ops = """ [i1] @@ -2685,47 +2314,6 @@ where p2 is a vstruct ssize, adescr=i1, bdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_v_all_1(self): - ops = """ - [i1, p1a, i2] - p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) - p7v = getfield_gc_r(p6s, descr=bdescr) - p5s = new(descr=ssize) - setfield_gc(p5s, i2, descr=adescr) - setfield_gc(p5s, p7v, descr=bdescr) - setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) - guard_true(i1) [p1a] - p2s = new(descr=ssize) - p3v = new_with_vtable(descr=nodesize) - p4a = new_array(2, descr=arraydescr2) - setfield_gc(p2s, i1, descr=adescr) - setfield_gc(p2s, p3v, descr=bdescr) - setfield_gc(p3v, i2, descr=valuedescr) - setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) - jump(i1, p4a, i2) - """ - expected = """ - [i1, ia, iv, pnull, i2] - guard_true(i1) [ia, iv, i2] - jump(1, 1, i2, NULL, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, ''' - Not, - VArray(arraydescr2, - VStruct(ssize, - adescr=Not, - bdescr=Virtual(node_vtable, - valuedescr=Not)), - Not), - Not''', expected) - self.check_expanded_fail_descr('''p1a - where p1a is a varray arraydescr2: p6s, p5s - where p6s is a vstruct ssize, adescr=ia, bdescr=p7v - where p5s is a vstruct ssize, adescr=i2, bdescr=p7v - where p7v is a node_vtable, valuedescr=iv - ''', rop.GUARD_TRUE) - def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] @@ -5178,6 +4766,8 @@ """ self.optimize_loop(ops, expected) + def test_intmod_bounds_harder(self): + py.test.skip("harder") # Of course any 'maybe-negative % power-of-two' can be turned into # int_and(), but that's a bit harder to detect here because it turns # into several operations, and of course it is wrong to just turn @@ -5195,7 +4785,6 @@ i4 = int_and(i0, 15) finish(i4) """ - py.test.skip("harder") self.optimize_loop(ops, expected) def test_intmod_bounds_bug1(self): @@ -5356,7 +4945,7 @@ i5 = int_lt(i2, i1) guard_true(i5) [] - i6 = getarrayitem_gc(p0, i2) + i6 = getarrayitem_gc_i(p0, i2, descr=chararraydescr) finish(i6) """ expected = """ @@ -5367,7 +4956,7 @@ i4 = int_lt(i2, i0) guard_true(i4) [] - i6 = getarrayitem_gc(p0, i3) + i6 = getarrayitem_gc_i(p0, i3, descr=chararraydescr) finish(i6) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2969,7 +2969,6 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3015,7 +3014,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3039,7 +3037,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] From pypy.commits at gmail.com Fri Jan 8 11:36:55 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 08 Jan 2016 08:36:55 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: some progress on the JIT front - it's still unfinished, but we have a working test Message-ID: <568fe5a7.84e31c0a.56c09.6d4a@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81628:09a93ad62225 Date: 2016-01-08 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/09a93ad62225/ Log: some progress on the JIT front - it's still unfinished, but we have a working test diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -0,0 +1,65 @@ + +import os +from rpython.jit.backend.test.support import CCompiledMixin +from rpython.rlib.jit import JitDriver +from rpython.tool.udir import udir +from rpython.jit.backend.detect_cpu import getcpuclass + +class CompiledVmprofTest(CCompiledMixin): + CPUClass = getcpuclass() + + def test_vmprof(self): + from rpython.rlib import rvmprof + + class MyCode: + pass + def get_name(code): + return 'py:code:52:x' + try: + rvmprof.register_code_object_class(MyCode, get_name) + except rvmprof.VMProfPlatformUnsupported, e: + py.test.skip(str(e)) + + driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num']) + + @rvmprof.vmprof_execute_code("xcode13", lambda code, num: code) + def main(code, num): + return main_jitted(code, num) + + def main_jitted(code, num): + s = 0 + i = 0 + while i < num: + driver.jit_merge_point(code=code, i=i, s=s, num=num) + s += (i << 1) + if s % 32423423423 and s > 0 == 0: + print s + i += 1 + return s + + tmpfilename = str(udir.join('test_rvmprof')) + + def f(num): + code = MyCode() + rvmprof.register_code(code, get_name) + fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) + period = 0.0001 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 + rvmprof.disable() + os.close(fd) + return 0 + + def check_vmprof_output(self): + from vmprof import read_profile + tmpfile = str(udir.join('test_rvmprof')) + stats = read_profile(tmpfile) + + self.meta_interp(f, [100000000]) + try: + import vmprof + except ImportError: + pass + else: + check_vmprof_output() \ No newline at end of file diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/test/test_rvmprof.py @@ -0,0 +1,44 @@ + +from rpython.rlib import jit +from rpython.rtyper.annlowlevel import llhelper +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rvmprof import _get_vmprof +from rpython.jit.backend.x86.arch import WORD +from rpython.jit.codewriter.policy import JitPolicy + +class BaseRVMProfTest(object): + def test_one(self): + visited = [] + + def helper(): + stackp = _get_vmprof().cintf.vmprof_address_of_global_stack()[0] + if stackp: + # not during tracing + stack = rffi.cast(rffi.CArrayPtr(lltype.Signed), stackp) + visited.append(rffi.cast(rffi.CArrayPtr(lltype.Signed), stack[1] - WORD)[0]) + else: + visited.append(0) + + llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper) + + driver = jit.JitDriver(greens=[], reds='auto') + + def f(n): + i = 0 + while i < n: + driver.jit_merge_point() + i += 1 + llfn() + + class Hooks(jit.JitHookInterface): + def after_compile(self, debug_info): + self.raw_start = debug_info.asminfo.rawstart + + hooks = Hooks() + + self.meta_interp(f, [10], policy=JitPolicy(hooks)) + v = set(visited) + assert 0 in v + v.remove(0) + assert len(v) == 1 + assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024 diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -31,7 +31,7 @@ if WORD == 4: # ebp + ebx + esi + edi + 15 extra words = 19 words - FRAME_FIXED_SIZE = 19 + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, @@ -41,7 +41,7 @@ THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 - FRAME_FIXED_SIZE = 19 + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -12,7 +12,7 @@ from rpython.jit.metainterp.compile import ResumeGuardDescr from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper import rclass from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken @@ -40,6 +40,7 @@ from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id +from rpython.rlib.rvmprof.rvmprof import _get_vmprof, VMPROF_JITTED_TAG class Assembler386(BaseAssembler, VectorAssemblerMixin): @@ -837,9 +838,23 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth + def _call_header_vmprof(self): + stack = _get_vmprof().cintf.vmprof_address_of_global_stack() + self.mc.MOV_rr(eax.value, esp.value) + self.mc.ADD_ri(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # er makes no sense + # next + self.mc.MOV(ecx, heap(stack)) + self.mc.MOV_mr((eax.value, 0), ecx.value) + # value + self.mc.MOV_mr((eax.value, WORD), esp.value) + # kind + self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) + self.mc.MOV(heap(stack), eax) + def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) + self._call_header_vmprof() if IS_X86_64: self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) self.mc.MOV_rr(ebp.value, edi.value) diff --git a/rpython/jit/backend/x86/test/test_rvmprof.py b/rpython/jit/backend/x86/test/test_rvmprof.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_rvmprof.py @@ -0,0 +1,7 @@ + +import py +from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestFfiCall(Jit386Mixin, BaseRVMProfTest): + pass \ No newline at end of file diff --git a/rpython/jit/backend/x86/test/test_zvmprof.py b/rpython/jit/backend/x86/test/test_zvmprof.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_zvmprof.py @@ -0,0 +1,7 @@ + +from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest + +class TestZVMprof(CompiledVmprofTest): + + gcrootfinder = "shadowstack" + gc = "incminimark" \ No newline at end of file diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -56,6 +56,9 @@ [rffi.INT], lltype.Void, compilation_info=eci, _nowrapper=True) + vmprof_address_of_global_stack = rffi.llexternal( + "vmprof_address_of_global_stack", [], rffi.CArrayPtr(lltype.Signed), + compilation_info=eci) return CInterface(locals()) @@ -106,6 +109,8 @@ #include "src/precommondefs.h" #include "vmprof_stack.h" +extern vmprof_stack* vmprof_global_stack; + %(type)s %(cont_name)s(%(llargs)s); %(type)s %(tramp_name)s(%(llargs)s, long unique_id) diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -11,6 +11,12 @@ # ____________________________________________________________ +# keep in sync with vmprof_stack.h +VMPROF_CODE_TAG = 1 +VMPROF_BLACKHOLE_TAG = 2 +VMPROF_JITTED_TAG = 3 +VMPROF_JITTING_TAG = 4 +VMPROF_GC_TAG = 5 class VMProfError(Exception): def __init__(self, msg): diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -55,6 +55,15 @@ static int opened_profile(char *interp_name); static void flush_codes(void); + + +RPY_EXTERN vmprof_stack* vmprof_global_stack; + +RPY_EXTERN void *vmprof_address_of_global_stack(void) +{ + return (void*)&vmprof_global_stack; +} + RPY_EXTERN char *vmprof_init(int fd, double interval, char *interp_name) { diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -16,10 +16,3 @@ // to worry too much. There is a potential for squeezing it with bit // patterns into one WORD, but I don't want to care RIGHT NOW, potential // for future optimization potential - -RPY_EXTERN vmprof_stack* vmprof_global_stack; - -RPY_EXTERN void *vmprof_address_of_global_stack(void) -{ - return (void*)&vmprof_global_stack; -} From pypy.commits at gmail.com Fri Jan 8 12:06:15 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 08 Jan 2016 09:06:15 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Give up testing windows: it kinda seems to work but I really, really, Message-ID: <568fec87.82df1c0a.7469f.7804@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2559:9430cc490818 Date: 2016-01-08 18:05 +0100 http://bitbucket.org/cffi/cffi/changeset/9430cc490818/ Log: Give up testing windows: it kinda seems to work but I really, really, really don't manage to make the tests pass. Call for help (we can be optimistic) diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -4,6 +4,10 @@ from testing.udir import udir import cffi +if sys.platform == 'win32': + py.test.skip("it 'should' work on Windows, but I did not manage at all" + " to make these tests pass. Please help") + local_dir = os.path.dirname(os.path.abspath(__file__)) _link_error = '?' From pypy.commits at gmail.com Fri Jan 8 13:09:57 2016 From: pypy.commits at gmail.com (jonaspf) Date: Fri, 08 Jan 2016 10:09:57 -0800 (PST) Subject: [pypy-commit] pypy default: Add support for floats as parameters to itertools.islice. Improves compatibility with CPython. Message-ID: <568ffb75.863f1c0a.ff9da.1382@mx.google.com> Author: Jonas Pfannschmidt Branch: Changeset: r81629:8ba0a03b22db Date: 2016-01-08 12:22 +0000 http://bitbucket.org/pypy/pypy/changeset/8ba0a03b22db/ Log: Add support for floats as parameters to itertools.islice. Improves compatibility with CPython. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x From pypy.commits at gmail.com Fri Jan 8 17:57:37 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:37 -0800 (PST) Subject: [pypy-commit] pypy default: kill unused methods Message-ID: <56903ee1.0f811c0a.80258.6532@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81630:cee9c69134f4 Date: 2016-01-08 18:44 +0100 http://bitbucket.org/pypy/pypy/changeset/cee9c69134f4/ Log: kill unused methods diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -249,12 +249,6 @@ d.produce_potential_short_preamble_ops(self.optimizer, sb, descr, index) - def register_dirty_field(self, descr, op, info): - self.field_cache(descr).register_dirty_field(op, info) - - def register_dirty_array_field(self, arraydescr, op, index, info): - self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info) - def clean_caches(self): del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() From pypy.commits at gmail.com Fri Jan 8 17:57:38 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:38 -0800 (PST) Subject: [pypy-commit] pypy default: remove some unused remnants from before the big refactoring Message-ID: <56903ee2.08e11c0a.ce9e2.6af9@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81631:7710475c1031 Date: 2016-01-08 18:55 +0100 http://bitbucket.org/pypy/pypy/changeset/7710475c1031/ Log: remove some unused remnants from before the big refactoring diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -39,7 +39,6 @@ self.cached_infos = [] self.cached_structs = [] self._lazy_setfield = None - self._lazy_setfield_registered = False def register_dirty_field(self, structop, info): self.cached_structs.append(structop) @@ -87,11 +86,8 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield, and register - # myself in the optheap's _lazy_setfields_and_arrayitems list + # common case: store the 'op' as lazy_setfield self._lazy_setfield = op - #if not self._lazy_setfield_registered: - # self._lazy_setfield_registered = True else: # this is the case where the pending setfield ends up @@ -201,15 +197,11 @@ self.postponed_op = None - # XXXX the rest is old - # cached array items: {array descr: {index: CachedField}} - #self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # - self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False @@ -250,7 +242,6 @@ descr, index) def clean_caches(self): - del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) From pypy.commits at gmail.com Fri Jan 8 17:57:40 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:40 -0800 (PST) Subject: [pypy-commit] pypy default: rename register_dirty_field to register_info. document and check its invariant. Message-ID: <56903ee4.cf0b1c0a.91856.6837@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81632:5017455f828c Date: 2016-01-08 20:12 +0100 http://bitbucket.org/pypy/pypy/changeset/5017455f828c/ Log: rename register_dirty_field to register_info. document and check its invariant. diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -40,7 +40,11 @@ self.cached_structs = [] self._lazy_setfield = None - def register_dirty_field(self, structop, info): + def register_info(self, structop, info): + # invariant: every struct or array ptr info, that is not virtual and + # that has a non-None entry at + # info._fields[descr.get_index()] + # must be in cache_infos self.cached_structs.append(structop) self.cached_infos.append(info) @@ -119,6 +123,9 @@ def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos if isinstance(res, PreambleOp): if not true_force: return res.op @@ -166,6 +173,9 @@ def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getitem(descr, self.index, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.ArrayPtrInfo): + assert opinfo in self.cached_infos if (isinstance(res, PreambleOp) and optheap.optimizer.cpu.supports_guard_gc_type): if not true_force: diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -203,7 +203,7 @@ if cf is not None: assert not self.is_virtual() assert struct is not None - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getfield(self, descr, optheap=None): self.init_fields(descr.get_parent_descr(), descr.get_index()) @@ -531,7 +531,7 @@ self._items[index] = op if cf is not None: assert not self.is_virtual() - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getitem(self, descr, index, optheap=None): if self._items is None or index >= len(self._items): From pypy.commits at gmail.com Fri Jan 8 17:57:42 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:42 -0800 (PST) Subject: [pypy-commit] pypy default: rename the really generic _getvalue to _get_rhs_from_set_op Message-ID: <56903ee6.d7bc1c0a.66b62.694e@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81633:f770507d0b18 Date: 2016-01-08 19:18 +0100 http://bitbucket.org/pypy/pypy/changeset/f770507d0b18/ Log: rename the really generic _getvalue to _get_rhs_from_set_op diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -75,7 +75,7 @@ def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) - arg1 = optheap.get_box_replacement(self._getvalue(op)) + arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): self.force_lazy_setfield(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) @@ -111,14 +111,15 @@ self.force_lazy_setfield(optheap, descr) if self._lazy_setfield is not None: op = self._lazy_setfield - return optheap.get_box_replacement(self._getvalue(op)) + return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) if res is not None: return res.get_box_replacement() return None - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ return op.getarg(1) def _getfield(self, opinfo, descr, optheap, true_force=True): @@ -168,7 +169,7 @@ self.index = index CachedField.__init__(self) - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): return op.getarg(2) def _getfield(self, opinfo, descr, optheap, true_force=True): From pypy.commits at gmail.com Fri Jan 8 17:57:43 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:43 -0800 (PST) Subject: [pypy-commit] pypy default: rename _setfield to put_field_back_to_info and document it (it's only called Message-ID: <56903ee7.ea5ec20a.cd36.1794@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81634:758c16008f01 Date: 2016-01-08 20:08 +0100 http://bitbucket.org/pypy/pypy/changeset/758c16008f01/ Log: rename _setfield to put_field_back_to_info and document it (it's only called after a lazy setfield has been forced). diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -155,11 +155,14 @@ # back in the cache: the value of this particular structure's # field. opinfo = optheap.ensure_ptr_info_arg0(op) - self._setfield(op, opinfo, optheap) + self.put_field_back_to_info(op, opinfo, optheap) elif not can_cache: self.invalidate(descr) - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) opinfo.setfield(op.getdescr(), struct, arg, optheap, self) @@ -186,7 +189,7 @@ opinfo.setitem(descr, index, None, res, optheap=optheap) return res - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) From pypy.commits at gmail.com Fri Jan 8 17:57:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:48 -0800 (PST) Subject: [pypy-commit] pypy default: have consistent argument order between setfield and setitem Message-ID: <56903eec.a658c20a.49ee6.73a5@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81637:bf4390452db6 Date: 2016-01-08 21:20 +0100 http://bitbucket.org/pypy/pypy/changeset/bf4390452db6/ Log: have consistent argument order between setfield and setitem diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -143,30 +143,32 @@ # abstract methods def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ raise NotImplementedError("abstract method") def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ raise NotImplementedError("abstract method") def _getfield(self, opinfo, descr, optheap, true_force=True): raise NotImplementedError("abstract method") def invalidate(self, descr): + """ clear all the cached knowledge in the infos in self.cached_infos. + """ raise NotImplementedError("abstract method") class CachedField(AbstractCachedEntry): def _get_rhs_from_set_op(self, op): - """ given a set(field or arrayitem) op, return the rhs argument """ return op.getarg(1) def put_field_back_to_info(self, op, opinfo, optheap): - """ this method is called just after a lazy setfield was ommitted. it - puts the information of the lazy setfield back into the proper cache in - the info. """ arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setfield(op.getdescr(), struct, arg, optheap, self) + opinfo.setfield(op.getdescr(), struct, arg, optheap=optheap, cf=self) def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getfield(descr, optheap) @@ -177,7 +179,7 @@ if not true_force: return res.op res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) + opinfo.setfield(descr, None, res, optheap=optheap) return res def invalidate(self, descr): @@ -213,7 +215,7 @@ def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) + opinfo.setitem(op.getdescr(), self.index, struct, arg, optheap=optheap, cf=self) def invalidate(self, descr): for opinfo in self.cached_infos: @@ -261,7 +263,7 @@ descrkeys = self.cached_fields.keys() if not we_are_translated(): # XXX Pure operation of boxes that are cached in several places will - # only be removed from the peeled loop when red from the first + # only be removed from the peeled loop when read from the first # place discovered here. This is far from ideal, as it makes # the effectiveness of our optimization a bit random. It should # howevere always generate correct results. For tests we dont @@ -509,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_setfield(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): @@ -523,7 +525,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) + structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -574,12 +576,12 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) - # the remember the result of reading the array item + # then remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), - self.get_box_replacement(op), cf, - self) + self.get_box_replacement(op), optheap=self, + cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -524,7 +524,7 @@ optforce.emit_operation(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): @@ -700,13 +700,13 @@ info = self._get_array_info(descr, optheap) return info.getitem(descr, index) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): info = self._get_array_info(descr, optheap) - info.setitem(descr, index, struct, op, cf) + info.setitem(descr, index, struct, op, optheap=optheap, cf=cf) def setfield(self, fielddescr, struct, op, optheap=None, cf=None): info = self._get_info(fielddescr.get_parent_descr(), optheap) - info.setfield(fielddescr, struct, op, optheap, cf) + info.setfield(fielddescr, struct, op, optheap=optheap, cf=cf) def is_null(self): return not bool(self._const.getref_base()) diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -81,7 +81,7 @@ assert index >= 0 cf = optheap.arrayitem_cache(descr, index) opinfo.setitem(self.getfield_op.getdescr(), index, self.res, - pop, cf, optheap=optheap) + pop, optheap, cf) def repr(self, memo): return "HeapOp(%s, %s)" % (self.res.repr(memo), From pypy.commits at gmail.com Fri Jan 8 17:57:45 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:45 -0800 (PST) Subject: [pypy-commit] pypy default: reduce the confusion of everything being name "descr" by renaming a few "descr" Message-ID: <56903ee9.08e11c0a.ce9e2.6afe@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81635:0becb07b3278 Date: 2016-01-08 20:32 +0100 http://bitbucket.org/pypy/pypy/changeset/0becb07b3278/ Log: reduce the confusion of everything being name "descr" by renaming a few "descr" to "fielddescr" diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -196,28 +196,28 @@ def all_items(self): return self._fields - def setfield(self, descr, struct, op, optheap=None, cf=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) assert isinstance(op, AbstractValue) - self._fields[descr.get_index()] = op + self._fields[fielddescr.get_index()] = op if cf is not None: assert not self.is_virtual() assert struct is not None cf.register_info(struct, self) - def getfield(self, descr, optheap=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) - return self._fields[descr.get_index()] + def getfield(self, fielddescr, optheap=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) + return self._fields[fielddescr.get_index()] def _force_elements(self, op, optforce, descr): if self._fields is None: return - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], - descr=flddescr) + descr=fielddescr) self._fields[i] = None optforce.emit_operation(setfieldop) @@ -249,16 +249,16 @@ if fieldinfo and fieldinfo.is_virtual(): fieldinfo.visitor_walk_recursive(op, visitor, optimizer) - def produce_short_preamble_ops(self, structbox, descr, index, optimizer, + def produce_short_preamble_ops(self, structbox, fielddescr, index, optimizer, shortboxes): if self._fields is None: return - if descr.get_index() >= len(self._fields): + if fielddescr.get_index() >= len(self._fields): # we don't know about this item return - op = optimizer.get_box_replacement(self._fields[descr.get_index()]) - opnum = OpHelpers.getfield_for_descr(descr) - getfield_op = ResOperation(opnum, [structbox], descr=descr) + op = optimizer.get_box_replacement(self._fields[fielddescr.get_index()]) + opnum = OpHelpers.getfield_for_descr(fielddescr) + getfield_op = ResOperation(opnum, [structbox], descr=fielddescr) shortboxes.add_heap_op(op, getfield_op) def _is_immutable_and_filled_with_constants(self, optimizer, memo=None): @@ -294,12 +294,12 @@ return True def _force_elements_immutable(self, descr, constptr, optforce): - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] subbox = optforce.force_box(fld) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, - flddescr, constptr, subbox) + fielddescr, constptr, subbox) class InstancePtrInfo(AbstractStructPtrInfo): _attrs_ = ('_known_class',) @@ -505,6 +505,7 @@ info._items = self._items[:] def _force_elements(self, op, optforce, descr): + # XXX descr = op.getdescr() const = optforce.new_const_item(self.descr) for i in range(self.length): @@ -626,13 +627,13 @@ i = 0 fielddescrs = op.getdescr().get_all_fielddescrs() for index in range(self.length): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: fld = self._items[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], - descr=flddescr) + descr=fielddescr) optforce.emit_operation(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here @@ -645,7 +646,7 @@ fielddescrs = self.descr.get_all_fielddescrs() i = 0 for index in range(self.getlength()): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: itemop = self._items[i] if (itemop is not None and not isinstance(itemop, Const)): @@ -691,9 +692,9 @@ optheap.const_infos[ref] = info return info - def getfield(self, descr, optheap=None): - info = self._get_info(descr.get_parent_descr(), optheap) - return info.getfield(descr) + def getfield(self, fielddescr, optheap=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + return info.getfield(fielddescr) def getitem(self, descr, index, optheap=None): info = self._get_array_info(descr, optheap) @@ -703,9 +704,9 @@ info = self._get_array_info(descr, optheap) info.setitem(descr, index, struct, op, cf) - def setfield(self, descr, struct, op, optheap=None, cf=None): - info = self._get_info(descr.get_parent_descr(), optheap) - info.setfield(descr, struct, op, optheap, cf) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + info.setfield(fielddescr, struct, op, optheap, cf) def is_null(self): return not bool(self._const.getref_base()) From pypy.commits at gmail.com Fri Jan 8 17:57:47 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 08 Jan 2016 14:57:47 -0800 (PST) Subject: [pypy-commit] pypy default: put the really shared method into an abstract base class Message-ID: <56903eeb.8205c20a.c5c42.ffffa1d6@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81636:67211e59dc29 Date: 2016-01-08 21:14 +0100 http://bitbucket.org/pypy/pypy/changeset/67211e59dc29/ Log: put the really shared method into an abstract base class diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -21,7 +21,10 @@ pass -class CachedField(object): +class AbstractCachedEntry(object): + """ abstract base class abstracting over the difference between caching + struct fields and array items. """ + def __init__(self): # Cache information for a field descr, or for an (array descr, index) # pair. It can be in one of two states: @@ -48,13 +51,6 @@ self.cached_structs.append(structop) self.cached_infos.append(info) - def invalidate(self, descr): - for opinfo in self.cached_infos: - assert isinstance(opinfo, info.AbstractStructPtrInfo) - opinfo._fields[descr.get_index()] = None - self.cached_infos = [] - self.cached_structs = [] - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): assert self._lazy_setfield is None @@ -118,22 +114,6 @@ return res.get_box_replacement() return None - def _get_rhs_from_set_op(self, op): - """ given a set(field or arrayitem) op, return the rhs argument """ - return op.getarg(1) - - def _getfield(self, opinfo, descr, optheap, true_force=True): - res = opinfo.getfield(descr, optheap) - if not we_are_translated() and res: - if isinstance(opinfo, info.AbstractStructPtrInfo): - assert opinfo in self.cached_infos - if isinstance(res, PreambleOp): - if not true_force: - return res.op - res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) - return res - def force_lazy_setfield(self, optheap, descr, can_cache=True): op = self._lazy_setfield if op is not None: @@ -159,6 +139,27 @@ elif not can_cache: self.invalidate(descr) + + # abstract methods + + def _get_rhs_from_set_op(self, op): + raise NotImplementedError("abstract method") + + def put_field_back_to_info(self, op, opinfo, optheap): + raise NotImplementedError("abstract method") + + def _getfield(self, opinfo, descr, optheap, true_force=True): + raise NotImplementedError("abstract method") + + def invalidate(self, descr): + raise NotImplementedError("abstract method") + + +class CachedField(AbstractCachedEntry): + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ + return op.getarg(1) + def put_field_back_to_info(self, op, opinfo, optheap): """ this method is called just after a lazy setfield was ommitted. it puts the information of the lazy setfield back into the proper cache in @@ -167,10 +168,30 @@ struct = optheap.get_box_replacement(op.getarg(0)) opinfo.setfield(op.getdescr(), struct, arg, optheap, self) -class ArrayCachedField(CachedField): + def _getfield(self, opinfo, descr, optheap, true_force=True): + res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos + if isinstance(res, PreambleOp): + if not true_force: + return res.op + res = optheap.optimizer.force_op_from_preamble(res) + opinfo.setfield(descr, None, res, optheap) + return res + + def invalidate(self, descr): + for opinfo in self.cached_infos: + assert isinstance(opinfo, info.AbstractStructPtrInfo) + opinfo._fields[descr.get_index()] = None + self.cached_infos = [] + self.cached_structs = [] + + +class ArrayCachedItem(AbstractCachedEntry): def __init__(self, index): self.index = index - CachedField.__init__(self) + AbstractCachedEntry.__init__(self) def _get_rhs_from_set_op(self, op): return op.getarg(2) @@ -284,7 +305,7 @@ try: cf = submap[index] except KeyError: - cf = submap[index] = ArrayCachedField(index) + cf = submap[index] = ArrayCachedItem(index) return cf def emit_operation(self, op): From pypy.commits at gmail.com Sat Jan 9 05:18:17 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Jan 2016 02:18:17 -0800 (PST) Subject: [pypy-commit] pypy default: cffi_imports depends on pypy-c being built first Message-ID: <5690de69.10581c0a.5e89d.ffffee3e@mx.google.com> Author: Armin Rigo Branch: Changeset: r81638:bb701d0fa4d3 Date: 2016-01-09 11:17 +0100 http://bitbucket.org/pypy/pypy/changeset/bb701d0fa4d3/ Log: cffi_imports depends on pypy-c being built first diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py From pypy.commits at gmail.com Sat Jan 9 08:07:51 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 09 Jan 2016 05:07:51 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: improve the test and make sure we emit the footer Message-ID: <56910627.e16ec20a.f0264.fffff527@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81639:b14e3b107fdf Date: 2016-01-09 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/b14e3b107fdf/ Log: improve the test and make sure we emit the footer diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py --- a/rpython/jit/backend/test/test_rvmprof.py +++ b/rpython/jit/backend/test/test_rvmprof.py @@ -36,9 +36,12 @@ hooks = Hooks() + stackp = _get_vmprof().cintf.vmprof_address_of_global_stack() + stackp[0] = 0 # make it empty self.meta_interp(f, [10], policy=JitPolicy(hooks)) v = set(visited) assert 0 in v v.remove(0) assert len(v) == 1 assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024 + assert stackp[0] == 0 # make sure we didn't leave anything dangling diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -851,6 +851,13 @@ self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) self.mc.MOV(heap(stack), eax) + def _call_footer_vmprof(self): + stack = _get_vmprof().cintf.vmprof_address_of_global_stack() + # *stack = stack->next + self.mc.MOV(eax, heap(stack)) + self.mc.MOV_rm(eax.value, (eax.value, 0)) + self.mc.MOV(heap(stack), eax) + def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) @@ -888,6 +895,7 @@ def _call_footer(self): # the return value is the jitframe + self._call_footer_vmprof() self.mc.MOV_rr(eax.value, ebp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap From pypy.commits at gmail.com Sat Jan 9 12:12:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Jan 2016 09:12:24 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: kill empty lines at the start and dedent manually, to give Message-ID: <56913f78.11181c0a.17ceb.6a4c@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2560:f35b3b8fe109 Date: 2016-01-09 18:12 +0100 http://bitbucket.org/cffi/cffi/changeset/f35b3b8fe109/ Log: kill empty lines at the start and dedent manually, to give reasonable-looking line numbers in tracebacks diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -650,14 +650,27 @@ def embedding_init_code(self, pysource): if self._embedding_init_code is not None: raise ValueError("embedding_init_code() can only be called once") - # check for SyntaxErrors, at least, and automatically add a - # "if 1:" line in front of the code if the whole pysource is - # indented - try: - compile(pysource, "cffi_init", "exec") - except IndentationError: - pysource = 'if 1:\n' + pysource - compile(pysource, "cffi_init", "exec") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # self._embedding_init_code = pysource From pypy.commits at gmail.com Sat Jan 9 12:24:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 09 Jan 2016 09:24:33 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Maybe it's clearer this way, with an API that matches the intent rather Message-ID: <56914251.8a58c20a.e6950.7d7a@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2561:968eeac942dc Date: 2016-01-09 18:24 +0100 http://bitbucket.org/cffi/cffi/changeset/968eeac942dc/ Log: Maybe it's clearer this way, with an API that matches the intent rather than how the implementation piggy-backs on ``extern "Python"`` diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -74,7 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None - self._embedding_init_code = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -94,7 +94,7 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False, packed=False, dllexport=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. @@ -102,14 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed, - dllexport=dllexport) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -648,7 +655,7 @@ return result def embedding_init_code(self, pysource): - if self._embedding_init_code is not None: + if self._embedding: raise ValueError("embedding_init_code() can only be called once") # fix 'pysource' before it gets dumped into the C file: # - remove empty lines at the beginning, so it starts at "line 1" @@ -671,7 +678,7 @@ # compile(pysource, "cffi_init", "exec") # - self._embedding_init_code = pysource + self._embedding = pysource def _load_backend_lib(backend, name, flags): diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -374,11 +374,10 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - if self._options['dllexport']: - tag = 'dllexport_python ' - else: - tag = 'extern_python ' + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: tag = 'function ' self._declare(tag + decl.name, tp) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -282,13 +282,13 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # - # if we have ffi._embedding_init_code, we give it here as a macro + # if we have ffi._embedding != None, we give it here as a macro # and include an extra file base_module_name = self.module_name.split('.')[-1] - if self.ffi._embedding_init_code is not None: + if self.ffi._embedding is not None: prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % - (self._string_literal(self.ffi._embedding_init_code),)) + (self._string_literal(self.ffi._embedding),)) prnt('#ifdef PYPY_VERSION') prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( base_module_name,)) @@ -1365,7 +1365,7 @@ if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: - if ffi._embedding_init_code is not None: + if ffi._embedding is not None: ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, diff --git a/demo/embedding.py b/demo/embedding.py --- a/demo/embedding.py +++ b/demo/embedding.py @@ -2,11 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" { - int add(int, int); - } -""", dllexport=True) +ffi.embedding_api(""" + int add(int, int); +""") ffi.embedding_init_code(""" from _embedding_cffi import ffi diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py --- a/testing/embedding/add1.py +++ b/testing/embedding/add1.py @@ -2,9 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" int add1(int, int); -""", dllexport=True) +ffi.embedding_api(""" + int add1(int, int); +""") ffi.embedding_init_code(r""" import sys, time diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py --- a/testing/embedding/add2.py +++ b/testing/embedding/add2.py @@ -2,9 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" int add2(int, int, int); -""", dllexport=True) +ffi.embedding_api(""" + int add2(int, int, int); +""") ffi.embedding_init_code(r""" import sys diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py --- a/testing/embedding/add3.py +++ b/testing/embedding/add3.py @@ -2,9 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" int add3(int, int, int, int); -""", dllexport=True) +ffi.embedding_api(""" + int add3(int, int, int, int); +""") ffi.embedding_init_code(r""" from _add3_cffi import ffi diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -2,10 +2,10 @@ ffi = cffi.FFI() -ffi.cdef(""" +ffi.embedding_api(""" int (*my_callback)(int); - extern "Python" int add_rec(int, int); -""", dllexport=True) + int add_rec(int, int); +""") ffi.embedding_init_code(r""" from _add_recursive_cffi import ffi, lib diff --git a/testing/embedding/perf.py b/testing/embedding/perf.py --- a/testing/embedding/perf.py +++ b/testing/embedding/perf.py @@ -2,9 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" int add1(int, int); -""", dllexport=True) +ffi.embedding_api(""" + int add1(int, int); +""") ffi.embedding_init_code(r""" from _perf_cffi import ffi diff --git a/testing/embedding/tlocal.py b/testing/embedding/tlocal.py --- a/testing/embedding/tlocal.py +++ b/testing/embedding/tlocal.py @@ -2,9 +2,9 @@ ffi = cffi.FFI() -ffi.cdef(""" - extern "Python" int add1(int, int); -""", dllexport=True) +ffi.embedding_api(""" + int add1(int, int); +""") ffi.embedding_init_code(r""" from _tlocal_cffi import ffi From pypy.commits at gmail.com Sat Jan 9 12:48:34 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:34 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Remove parts of the llvmgcroot implementation. Message-ID: <569147f2.ea5ec20a.cd36.2081@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81640:efa71b41fec2 Date: 2016-01-05 07:06 +0100 http://bitbucket.org/pypy/pypy/changeset/efa71b41fec2/ Log: Remove parts of the llvmgcroot implementation. The existing implementation uses LLVM's old GC support, which is kind of deprecated and didn't give much improvement in PyPy's case. I'm working on using LLVM's new GC support. Parts of the llvmgcroot implementation can be reused. diff --git a/rpython/memory/gctransform/llvmgcroot.py b/rpython/memory/gctransform/llvmgcroot.py --- a/rpython/memory/gctransform/llvmgcroot.py +++ b/rpython/memory/gctransform/llvmgcroot.py @@ -31,28 +31,10 @@ class LLVMGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer): def push_roots(self, hop, keep_current_args=False): - livevars = self.get_livevars_for_roots(hop, keep_current_args) - self.num_pushs += len(livevars) - for k, var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k) - v_adr = gen_cast(hop.llops, llmemory.Address, var) - hop.genop("llvm_store_gcroot", [c_k, v_adr]) - return livevars + return def pop_roots(self, hop, livevars): - if not livevars: - return - if self.gcdata.gc.moving_gc: - # for moving collectors, reload the roots into the local variables - for k, var in enumerate(livevars): - c_k = rmodel.inputconst(lltype.Signed, k) - v_newaddr = hop.genop("llvm_load_gcroot", [c_k], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) - for k in xrange(len(livevars)): - c_k = rmodel.inputconst(lltype.Signed, k) - c_null = rmodel.inputconst(llmemory.Address, llmemory.NULL) - hop.genop("llvm_store_gcroot", [c_k, c_null]) + return def gct_direct_call(self, hop): fnptr = hop.spaceop.args[0].value diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -574,8 +574,6 @@ # __________ for llvm.gcroot() support __________ 'llvm_gcmap': LLOp(sideeffects=False), - 'llvm_store_gcroot': LLOp(), - 'llvm_load_gcroot': LLOp(sideeffects=False), 'llvm_stack_malloc': LLOp(sideeffects=False), } # ***** Run test_lloperation after changes. ***** diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -943,7 +943,6 @@ def write_graph(self, ptr_type, name, graph, export): genllvm = database.genllvm genllvm.gcpolicy.gctransformer.inline_helpers(graph) - gcrootscount = 0 # the 'gc_reload_possibly_moved' operations make the graph not # really SSA. Fix them now. for block in graph.iterblocks(): @@ -960,12 +959,6 @@ op.args = [v_newaddr] op.result = v_newptr rename[v_targetvar] = v_newptr - elif op.opname == 'llvm_store_gcroot': - index = op.args[0].value - gcrootscount = max(gcrootscount, index+1) - elif op.opname == 'gc_stack_bottom': - database.stack_bottoms.append( - '{} {}'.format(ptr_type.repr_type(), name)) if rename: block.exitswitch = rename.get(block.exitswitch, block.exitswitch) @@ -980,6 +973,7 @@ llvmgcroot = genllvm.translator.config.translation.gcrootfinder == \ 'llvmgcroot' if llvmgcroot: + raise NotImplementedError try: prevent_inline = graph.func._gctransformer_hint_close_stack_ except AttributeError: @@ -1004,13 +998,7 @@ for block in graph.iterblocks(): self.w(self.block_to_name[block] + ':', ' ') - if block is graph.startblock: - for i in xrange(gcrootscount): - self.w('%gcroot{} = alloca i8*'.format(i)) - self.w('call void @llvm.gcroot(i8** %gcroot{}, i8* null)' - .format(i)) - self.w('store i8* null, i8** %gcroot{}'.format(i)) - else: + if block is not graph.startblock: self.write_phi_nodes(block) self.write_operations(block) self.write_branches(block) @@ -1101,14 +1089,6 @@ self.w('{result.V} = bitcast i8* @__gcmap to {result.T}' .format(**locals())) - def op_llvm_store_gcroot(self, result, index, value): - self.w('store {value.TV}, {value.T}* %gcroot{index.V}' - .format(**locals())) - - def op_llvm_load_gcroot(self, result, index): - self.w('{result.V} = load {result.T}, {result.T}* %gcroot{index.V}' - .format(**locals())) - def op_llvm_stack_malloc(self, result): type = result.type.to.repr_type() self.w('{result.V} = alloca {type}'.format(**locals())) diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -713,7 +713,7 @@ def _set_backend(cls, t): t.ensure_backend('llvm') -class TestMiniMarkGCLLVMGCRoot(test_newgc.TestMiniMarkGC): +class DisabledTestMiniMarkGCLLVMGCRoot(test_newgc.TestMiniMarkGC): @classmethod def _set_backend(cls, t): t.ensure_backend('llvm') From pypy.commits at gmail.com Sat Jan 9 12:48:39 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:39 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Shuffle around some code in anticipation of the new llvmgcroot implementation. Message-ID: <569147f7.a658c20a.49ee6.7d58@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81643:e58704ad4da5 Date: 2016-01-05 07:39 +0100 http://bitbucket.org/pypy/pypy/changeset/e58704ad4da5/ Log: Shuffle around some code in anticipation of the new llvmgcroot implementation. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -972,12 +972,7 @@ def prepare_graph(self, ptr_type, name, graph): genllvm = database.genllvm genllvm.gcpolicy.gctransformer.inline_helpers(graph) - self.transform_gc_reload_possibly_moved(graph) - - remove_double_links(graph) - no_links_to_startblock(graph) remove_same_as(graph) - SSI_to_SSA(graph) llvmgcroot = genllvm.translator.config.translation.gcrootfinder == \ 'llvmgcroot' @@ -989,7 +984,12 @@ prevent_inline = (name == '@rpy_walk_stack_roots' or name.startswith('@rpy_stack_check')) else: + self.transform_gc_reload_possibly_moved(graph) prevent_inline = False + + remove_double_links(graph) + no_links_to_startblock(graph) + SSI_to_SSA(graph) return prevent_inline, llvmgcroot def transform_gc_reload_possibly_moved(self, graph): diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -713,12 +713,14 @@ def _set_backend(cls, t): t.ensure_backend('llvm') + class DisabledTestMiniMarkGCLLVMGCRoot(test_newgc.TestMiniMarkGC): @classmethod def _set_backend(cls, t): t.ensure_backend('llvm') t.ensure_opt('gcrootfinder', 'llvmgcroot') + class TestMiniMarkGCMostCompactLLVM(test_newgc.TestMiniMarkGCMostCompact): @classmethod def _set_backend(cls, t): From pypy.commits at gmail.com Sat Jan 9 12:48:36 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:36 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Put gc_reload_possibly_moved transformation in its own method. Message-ID: <569147f4.890bc30a.30634.2095@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81641:ded16e205ad2 Date: 2016-01-05 07:14 +0100 http://bitbucket.org/pypy/pypy/changeset/ded16e205ad2/ Log: Put gc_reload_possibly_moved transformation in its own method. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -943,27 +943,7 @@ def write_graph(self, ptr_type, name, graph, export): genllvm = database.genllvm genllvm.gcpolicy.gctransformer.inline_helpers(graph) - # the 'gc_reload_possibly_moved' operations make the graph not - # really SSA. Fix them now. - for block in graph.iterblocks(): - rename = {} - for op in block.operations: - if rename: - op.args = [rename.get(v, v) for v in op.args] - if op.opname == 'gc_reload_possibly_moved': - v_newaddr, v_targetvar = op.args - assert isinstance(v_targetvar.concretetype, lltype.Ptr) - v_newptr = Variable() - v_newptr.concretetype = v_targetvar.concretetype - op.opname = 'cast_adr_to_ptr' - op.args = [v_newaddr] - op.result = v_newptr - rename[v_targetvar] = v_newptr - if rename: - block.exitswitch = rename.get(block.exitswitch, - block.exitswitch) - for link in block.exits: - link.args = [rename.get(v, v) for v in link.args] + self.transform_gc_reload_possibly_moved(graph) remove_double_links(graph) no_links_to_startblock(graph) @@ -1008,6 +988,27 @@ self.w('unreachable') self.w('}', '') + def transform_gc_reload_possibly_moved(self, graph): + for block in graph.iterblocks(): + rename = {} + for op in block.operations: + if rename: + op.args = [rename.get(v, v) for v in op.args] + if op.opname == 'gc_reload_possibly_moved': + v_newaddr, v_targetvar = op.args + assert isinstance(v_targetvar.concretetype, lltype.Ptr) + v_newptr = Variable() + v_newptr.concretetype = v_targetvar.concretetype + op.opname = 'cast_adr_to_ptr' + op.args = [v_newaddr] + op.result = v_newptr + rename[v_targetvar] = v_newptr + if rename: + block.exitswitch = rename.get(block.exitswitch, + block.exitswitch) + for link in block.exits: + link.args = [rename.get(v, v) for v in link.args] + def write_phi_nodes(self, block): for i, arg in enumerate(block.inputargs): if (arg.concretetype == lltype.Void or From pypy.commits at gmail.com Sat Jan 9 12:48:42 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:42 -0800 (PST) Subject: [pypy-commit] pypy default: Simplify code. Message-ID: <569147fa.aa5dc20a.74c0.429a@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81645:090cb15bfa02 Date: 2016-01-09 18:41 +0100 http://bitbucket.org/pypy/pypy/changeset/090cb15bfa02/ Log: Simplify code. The code returns False in any case. Also, the transactionsafe attribute is only ever used in the STM branch. diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -21,12 +21,8 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): return False From pypy.commits at gmail.com Sat Jan 9 12:48:37 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:37 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Move code that prepares graph before writing in its own method. Message-ID: <569147f5.ea5ec20a.cd36.2087@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81642:04404ecef89b Date: 2016-01-05 07:19 +0100 http://bitbucket.org/pypy/pypy/changeset/04404ecef89b/ Log: Move code that prepares graph before writing in its own method. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -941,26 +941,7 @@ self.lines.append('{}{}\n'.format(indent, line)) def write_graph(self, ptr_type, name, graph, export): - genllvm = database.genllvm - genllvm.gcpolicy.gctransformer.inline_helpers(graph) - self.transform_gc_reload_possibly_moved(graph) - - remove_double_links(graph) - no_links_to_startblock(graph) - remove_same_as(graph) - SSI_to_SSA(graph) - - llvmgcroot = genllvm.translator.config.translation.gcrootfinder == \ - 'llvmgcroot' - if llvmgcroot: - raise NotImplementedError - try: - prevent_inline = graph.func._gctransformer_hint_close_stack_ - except AttributeError: - prevent_inline = (name == '@rpy_walk_stack_roots' or - name.startswith('@rpy_stack_check')) - else: - prevent_inline = False + prevent_inline, llvmgcroot = self.prepare_graph(ptr_type, name, graph) self.w('define {linkage}{retvar.T} {name}({a}){add}{gc} {{'.format( linkage='' if export else 'internal ', retvar=get_repr(graph.getreturnvar()), @@ -988,6 +969,29 @@ self.w('unreachable') self.w('}', '') + def prepare_graph(self, ptr_type, name, graph): + genllvm = database.genllvm + genllvm.gcpolicy.gctransformer.inline_helpers(graph) + self.transform_gc_reload_possibly_moved(graph) + + remove_double_links(graph) + no_links_to_startblock(graph) + remove_same_as(graph) + SSI_to_SSA(graph) + + llvmgcroot = genllvm.translator.config.translation.gcrootfinder == \ + 'llvmgcroot' + if llvmgcroot: + raise NotImplementedError + try: + prevent_inline = graph.func._gctransformer_hint_close_stack_ + except AttributeError: + prevent_inline = (name == '@rpy_walk_stack_roots' or + name.startswith('@rpy_stack_check')) + else: + prevent_inline = False + return prevent_inline, llvmgcroot + def transform_gc_reload_possibly_moved(self, graph): for block in graph.iterblocks(): rename = {} From pypy.commits at gmail.com Sat Jan 9 12:48:41 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:41 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Remove dead code. Message-ID: <569147f9.878e1c0a.7b80.77e9@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81644:7c9fd7e467ba Date: 2016-01-05 07:43 +0100 http://bitbucket.org/pypy/pypy/changeset/7c9fd7e467ba/ Log: Remove dead code. diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -946,12 +946,6 @@ def op_llvm_gcmap(self): raise NotImplementedError("llvm_gcmap") - def op_llvm_store_gcroot(self): - raise NotImplementedError("llvm_store_gcroot") - - def op_llvm_load_gcroot(self): - raise NotImplementedError("llvm_load_gcroot") - def op_llvm_stack_malloc(self): raise NotImplementedError("llvm_stack_malloc") From pypy.commits at gmail.com Sat Jan 9 12:48:44 2016 From: pypy.commits at gmail.com (mjacob) Date: Sat, 09 Jan 2016 09:48:44 -0800 (PST) Subject: [pypy-commit] pypy default: Let GraphAnalyzer's analyze_external_call() return a more conservative result in case of a delayed pointer. Message-ID: <569147fc.6918c20a.deb73.ffffe7c7@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81646:e48142af346f Date: 2016-01-09 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/e48142af346f/ Log: Let GraphAnalyzer's analyze_external_call() return a more conservative result in case of a delayed pointer. This is consistent with what subclassses and the analyze() method (for indirect calls) are doing. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -59,7 +59,7 @@ try: funcobj = op.args[0].value._obj except DelayedPointer: - return self.bottom_result() + return self.top_result() result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper From pypy.commits at gmail.com Sat Jan 9 14:17:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 09 Jan 2016 11:17:57 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Remove dead code Message-ID: <56915ce5.a5c9c20a.6cfe5.ffff88eb@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81647:6bb16ed333ea Date: 2016-01-09 19:17 +0000 http://bitbucket.org/pypy/pypy/changeset/6bb16ed333ea/ Log: Remove dead code diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -74,12 +74,6 @@ self.more_ll_values.append(link.llexitcase) elif link.exitcase is not None: mix.append(Constant(link.exitcase)) - if self.exception_policy == "CPython": - v, exc_cleanup_ops = self.graph.exc_cleanup - mix.append(v) - for cleanupop in exc_cleanup_ops: - mix.extend(cleanupop.args) - mix.append(cleanupop.result) uniquemix = [] seen = identity_dict() From pypy.commits at gmail.com Sat Jan 9 18:13:01 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 09 Jan 2016 15:13:01 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: reduce optimization level in test module compilation Message-ID: <569193fd.6351c20a.2321.fffff25b@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81648:5f7daa4980f3 Date: 2016-01-10 01:02 +0200 http://bitbucket.org/pypy/pypy/changeset/5f7daa4980f3/ Log: reduce optimization level in test module compilation diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -73,7 +73,7 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-g", "-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-O0", "-g", "-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( @@ -107,7 +107,7 @@ elif sys.platform == 'darwin': pass elif sys.platform.startswith('linux'): - kwds["compile_extra"]=["-g","-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-O0", "-g","-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( From pypy.commits at gmail.com Sat Jan 9 18:13:03 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 09 Jan 2016 15:13:03 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: copy add_docstring from numpy, add test that passes with -A Message-ID: <569193ff.cdb81c0a.ad17d.ffffd287@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81649:94e1a7e213d9 Date: 2016-01-10 01:10 +0200 http://bitbucket.org/pypy/pypy/changeset/94e1a7e213d9/ Log: copy add_docstring from numpy, add test that passes with -A diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -20,6 +20,7 @@ long long foo_longlong; unsigned long long foo_ulonglong; Py_ssize_t foo_ssizet; + PyObject * foo_docless; } fooobject; static PyTypeObject footype; @@ -184,6 +185,7 @@ {"longlong_member", T_LONGLONG, offsetof(fooobject, foo_longlong), 0, NULL}, {"ulonglong_member", T_ULONGLONG, offsetof(fooobject, foo_ulonglong), 0, NULL}, {"ssizet_member", T_PYSSIZET, offsetof(fooobject, foo_ssizet), 0, NULL}, + {"docless_member", T_OBJECT, offsetof(fooobject, foo_docless), READONLY, NULL}, {NULL} /* Sentinel */ }; @@ -623,12 +625,107 @@ (destructor)custom_dealloc, /*tp_dealloc*/ }; +static PyObject * add_docstring(PyObject * self, PyObject * args) +{ + PyObject *obj; + PyObject *str; + char *docstr; + static char *msg = "already has a docstring"; + PyObject *tp_dict = footype.tp_dict; + PyObject *myobj; + static PyTypeObject *PyMemberDescr_TypePtr = NULL; + static PyTypeObject *PyGetSetDescr_TypePtr = NULL; + static PyTypeObject *PyMethodDescr_TypePtr = NULL; + + /* Don't add docstrings */ + if (Py_OptimizeFlag > 1) { + Py_RETURN_NONE; + } + + if (PyGetSetDescr_TypePtr == NULL) { + /* Get "subdescr" */ + myobj = PyDict_GetItemString(tp_dict, "name"); + if (myobj != NULL) { + PyGetSetDescr_TypePtr = Py_TYPE(myobj); + } + } + if (PyMemberDescr_TypePtr == NULL) { + myobj = PyDict_GetItemString(tp_dict, "int_member"); + if (myobj != NULL) { + PyMemberDescr_TypePtr = Py_TYPE(myobj); + } + } + if (PyMethodDescr_TypePtr == NULL) { + myobj = PyDict_GetItemString(tp_dict, "classmeth"); + if (myobj != NULL) { + PyMethodDescr_TypePtr = Py_TYPE(myobj); + } + } + + if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str)) { + return NULL; + } + docstr = PyString_AS_STRING(str); +#define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type) +#define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr) +#define _ADDDOC(typebase, doc, name) do { \ + Py##typebase##Object *new = (Py##typebase##Object *)obj; \ + if (!(doc)) { \ + doc = docstr; \ + } \ + else { \ + PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ + return NULL; \ + } \ + } while (0) + + if (_TESTDOC1(CFunction)) { + _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + } + else if (_TESTDOC1(Type)) { + _ADDDOC(Type, new->tp_doc, new->tp_name); + } + else if (_TESTDOC2(MemberDescr)) { + _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + } + else if (_TESTDOC2(GetSetDescr)) { + _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + } + else if (_TESTDOC2(MethodDescr)) { + _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + } + else { + PyObject *doc_attr; + + doc_attr = PyObject_GetAttrString(obj, "__doc__"); + if (doc_attr != NULL && doc_attr != Py_None) { + PyErr_Format(PyExc_RuntimeError, "object %s", msg); + return NULL; + } + Py_XDECREF(doc_attr); + + if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { + PyErr_SetString(PyExc_TypeError, + "Cannot set a docstring for that object"); + return NULL; + } + Py_RETURN_NONE; + } + +#undef _TESTDOC1 +#undef _TESTDOC2 +#undef _ADDDOC + + Py_INCREF(str); + Py_RETURN_NONE; +} /* List of functions exported by this module */ static PyMethodDef foo_functions[] = { {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {"add_docstring", (PyCFunction)add_docstring, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -55,6 +55,9 @@ raises(SystemError, "obj.broken_member = 42") assert module.fooType.broken_member.__doc__ is None assert module.fooType.object_member.__doc__ == "A Python object." + module.add_docstring(module.fooType.docless_member, "docstring for docless_member") + assert module.fooType.docless_member.__doc__ == "docstring for docless_member" + assert str(type(module.fooType.int_member)) == "" def test_typeobject_object_member(self): module = self.import_module(name='foo') From pypy.commits at gmail.com Sun Jan 10 08:27:13 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 05:27:13 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fixes Message-ID: <56925c31.a3f6c20a.12549.63e3@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81650:17ae8014babf Date: 2016-01-09 16:23 +0200 http://bitbucket.org/pypy/pypy/changeset/17ae8014babf/ Log: fixes diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py --- a/rpython/jit/backend/test/test_rvmprof.py +++ b/rpython/jit/backend/test/test_rvmprof.py @@ -15,7 +15,8 @@ if stackp: # not during tracing stack = rffi.cast(rffi.CArrayPtr(lltype.Signed), stackp) - visited.append(rffi.cast(rffi.CArrayPtr(lltype.Signed), stack[1] - WORD)[0]) + item = rffi.cast(rffi.CArrayPtr(lltype.Signed), stack[1] - WORD)[0] + visited.append(item) else: visited.append(0) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -839,24 +839,27 @@ return frame_depth def _call_header_vmprof(self): - stack = _get_vmprof().cintf.vmprof_address_of_global_stack() + stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) self.mc.MOV_rr(eax.value, esp.value) self.mc.ADD_ri(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # er makes no sense # next - self.mc.MOV(ecx, heap(stack)) + self.mc.MOV_ri(ecx.value, stack) + self.mc.MOV_rm(ecx.value, (ecx.value, 0)) self.mc.MOV_mr((eax.value, 0), ecx.value) # value self.mc.MOV_mr((eax.value, WORD), esp.value) # kind self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) - self.mc.MOV(heap(stack), eax) + self.mc.MOV_ri(ecx.value, stack) + self.mc.MOV_mr((ecx.value, 0), eax.value) def _call_footer_vmprof(self): - stack = _get_vmprof().cintf.vmprof_address_of_global_stack() + stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) # *stack = stack->next - self.mc.MOV(eax, heap(stack)) + self.mc.MOV_ri(ecx.value, stack) + self.mc.MOV_rm(eax.value, (ecx.value, 0)) self.mc.MOV_rm(eax.value, (eax.value, 0)) - self.mc.MOV(heap(stack), eax) + self.mc.MOV_mr((ecx.value, 0), eax.value) def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py --- a/rpython/jit/metainterp/quasiimmut.py +++ b/rpython/jit/metainterp/quasiimmut.py @@ -51,6 +51,7 @@ class QuasiImmut(object): llopaque = True compress_limit = 30 + looptokens_wrefs = None def __init__(self, cpu): self.cpu = cpu @@ -75,7 +76,7 @@ def compress_looptokens_list(self): self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs if wref() is not None] - # NB. we must keep around the looptoken_wrefs that are + # NB. we must keep around the looptokens_wrefs that are # already invalidated; see below self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 @@ -83,6 +84,9 @@ # When this is called, all the loops that we record become # invalid: all GUARD_NOT_INVALIDATED in these loops (and # in attached bridges) must now fail. + if self.looptokens_wrefs is None: + # can't happen, but helps compiled tests + return wrefs = self.looptokens_wrefs self.looptokens_wrefs = [] for wref in wrefs: From pypy.commits at gmail.com Sun Jan 10 08:27:14 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 05:27:14 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: start working on the JIT support Message-ID: <56925c32.6918c20a.deb73.0011@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81651:00aa9b847f85 Date: 2016-01-10 15:26 +0200 http://bitbucket.org/pypy/pypy/changeset/00aa9b847f85/ Log: start working on the JIT support diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -32,7 +32,7 @@ while i < num: driver.jit_merge_point(code=code, i=i, s=s, num=num) s += (i << 1) - if s % 32423423423 and s > 0 == 0: + if s % 32423423423 == 0 and s > 0 == 0: print s i += 1 return s @@ -51,7 +51,7 @@ os.close(fd) return 0 - def check_vmprof_output(self): + def check_vmprof_output(): from vmprof import read_profile tmpfile = str(udir.join('test_rvmprof')) stats = read_profile(tmpfile) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -843,23 +843,20 @@ self.mc.MOV_rr(eax.value, esp.value) self.mc.ADD_ri(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # er makes no sense # next - self.mc.MOV_ri(ecx.value, stack) - self.mc.MOV_rm(ecx.value, (ecx.value, 0)) + self.mc.MOV(ecx, heap(stack)) self.mc.MOV_mr((eax.value, 0), ecx.value) # value self.mc.MOV_mr((eax.value, WORD), esp.value) # kind self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) - self.mc.MOV_ri(ecx.value, stack) - self.mc.MOV_mr((ecx.value, 0), eax.value) + self.mc.MOV(heap(stack), eax) def _call_footer_vmprof(self): stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) # *stack = stack->next - self.mc.MOV_ri(ecx.value, stack) - self.mc.MOV_rm(eax.value, (ecx.value, 0)) + self.mc.MOV(eax, heap(stack)) self.mc.MOV_rm(eax.value, (eax.value, 0)) - self.mc.MOV_mr((ecx.value, 0), eax.value) + self.mc.MOV(heap(stack), eax) def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) diff --git a/rpython/jit/backend/x86/test/test_zvmprof.py b/rpython/jit/backend/x86/test/test_zrpy_vmprof.py copy from rpython/jit/backend/x86/test/test_zvmprof.py copy to rpython/jit/backend/x86/test/test_zrpy_vmprof.py diff --git a/rpython/rlib/rvmprof/src/rvmprof.h b/rpython/rlib/rvmprof/src/rvmprof.h --- a/rpython/rlib/rvmprof/src/rvmprof.h +++ b/rpython/rlib/rvmprof/src/rvmprof.h @@ -7,4 +7,5 @@ RPY_EXTERN void* vmprof_stack_new(void); RPY_EXTERN int vmprof_stack_append(void*, long); RPY_EXTERN long vmprof_stack_pop(void*); -RPY_EXTERN void vmprof_stack_free(void*); \ No newline at end of file +RPY_EXTERN void vmprof_stack_free(void*); +RPY_EXTERN void* vmprof_address_of_global_stack(void); \ No newline at end of file diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -1,39 +1,32 @@ -#ifdef PYPY_JIT_CODEMAP void *pypy_find_codemap_at_addr(long addr, long *start_addr); long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); long pypy_jit_stack_depth_at_loc(long loc); -#endif -#ifdef CPYTHON_GET_CUSTOM_OFFSET -static void *tramp_start, *tramp_end; -#endif - -static long vmprof_write_header_for_jit_addr(void **result, long n, - void *ip, int max_depth) +static long vmprof_write_header_for_jit_addr(intptr_t *result, long n, + intptr_t ip, int max_depth) { -#ifdef PYPY_JIT_CODEMAP void *codemap; long current_pos = 0; intptr_t id; long start_addr = 0; intptr_t addr = (intptr_t)ip; int start, k; - void *tmp; + intptr_t tmp; codemap = pypy_find_codemap_at_addr(addr, &start_addr); - if (codemap == NULL) - // not a jit code at all + if (codemap == NULL || n >= max_depth - 2) + // not a jit code at all or almost max depth return n; // modify the last entry to point to start address and not the random one // in the middle - result[n - 1] = (void*)start_addr; - result[n] = (void*)2; - n++; - start = n; + result[n] = VMPROF_ASSEMBLER_TAG; + result[n + 1] = start_addr; + n += 2; + start = n + 2; while (n < max_depth) { id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); if (id == -1) @@ -41,7 +34,8 @@ break; if (id == 0) continue; // not main codemap - result[n++] = (void *)id; + result[n++] = VMPROF_JITTED_TAG; + result[n++] = id; } k = 0; while (k < (n - start) / 2) { @@ -50,9 +44,5 @@ result[n - k - 1] = tmp; k++; } - if (n < max_depth) { - result[n++] = (void*)3; - } -#endif return n; } diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -134,7 +134,7 @@ } }; -void* GetPC(ucontext_t *signal_ucontext) { +intptr_t GetPC(ucontext_t *signal_ucontext) { // See comment above struct CallUnrollInfo. Only try instruction // flow matching if both eip and esp looks reasonable. const int eip = signal_ucontext->uc_mcontext.gregs[REG_EIP]; @@ -146,12 +146,12 @@ if (!memcmp(eip_char + callunrollinfo[i].pc_offset, callunrollinfo[i].ins, callunrollinfo[i].ins_size)) { // We have a match. - void **retaddr = (void**)(esp + callunrollinfo[i].return_sp_offset); + intptr_t *retaddr = (intptr_t*)(esp + callunrollinfo[i].return_sp_offset); return *retaddr; } } } - return (void*)eip; + return eip; } // Special case #2: Windows, which has to do something totally different. @@ -170,7 +170,7 @@ typedef int ucontext_t; #endif -void* GetPC(ucontext_t *signal_ucontext) { +intptr_t GetPC(ucontext_t *signal_ucontext) { RAW_LOG(ERROR, "GetPC is not yet implemented on Windows\n"); return NULL; } @@ -180,11 +180,11 @@ // the right value for your system, and add it to the list in // configure.ac (or set it manually in your config.h). #else -void* GetPC(ucontext_t *signal_ucontext) { +intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ - return (void*)(signal_ucontext->uc_mcontext->__ss.__rip); + return (signal_ucontext->uc_mcontext->__ss.__rip); #else - return (void*)signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h + return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif } diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -130,7 +130,7 @@ char padding[sizeof(long) - 1]; char marker; long count, depth; - void *stack[]; + intptr_t stack[]; }; static long profile_interval_usec = 0; @@ -144,13 +144,22 @@ * ************************************************************* */ -static int get_stack_trace(void **result, int max_depth, ucontext_t *ucontext) +static int get_stack_trace(intptr_t *result, int max_depth, intptr_t pc, ucontext_t *ucontext) { struct vmprof_stack* stack = vmprof_global_stack; int n = 0; + intptr_t addr = 0; + int bottom_jitted = 0; + // check if the pc is in JIT + if (pypy_find_codemap_at_addr((intptr_t)pc, &addr)) { + // the bottom part is jitted, means we can fill up the first part + // from the JIT + n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); + bottom_jitted = 1; + } while (n < max_depth - 1 && stack) { - result[n] = (void*)stack->kind; - result[n + 1] = (void*)stack->value; + result[n] = stack->kind; + result[n + 1] = stack->value; stack = stack->next; n += 2; } @@ -205,7 +214,7 @@ } #endif -static void *get_current_thread_id(void) +static intptr_t get_current_thread_id(void) { /* xxx This function is a hack on two fronts: @@ -219,7 +228,7 @@ An alternative would be to try to look if the information is available in the ucontext_t in the caller. */ - return (void *)pthread_self(); + return (intptr_t)pthread_self(); } @@ -247,7 +256,8 @@ st->marker = MARKER_STACKTRACE; st->count = 1; //st->stack[0] = GetPC((ucontext_t*)ucontext); - depth = get_stack_trace(st->stack, MAX_STACK_DEPTH-2, ucontext); + depth = get_stack_trace(st->stack, + MAX_STACK_DEPTH-2, GetPC((ucontext_t*)ucontext), ucontext); //depth++; // To account for pc value in stack[0]; st->depth = depth; st->stack[depth++] = get_current_thread_id(); @@ -402,46 +412,11 @@ static int close_profile(void) { - char buf[4096]; - ssize_t size; unsigned char marker = MARKER_TRAILER; if (_write_all(&marker, 1) < 0) return -1; -#ifdef __linux__ - // copy /proc/self/maps to the end of the profile file - int srcfd = open("/proc/self/maps", O_RDONLY); - if (srcfd < 0) - return -1; - - while ((size = read(srcfd, buf, sizeof buf)) > 0) { - if (_write_all(buf, size) < 0) { - close(srcfd); - return -1; - } - } - close(srcfd); -#else - // freebsd and mac -#if defined(__APPLE__) - sprintf(buf, "vmmap %d", getpid()); -#else - sprintf(buf, "procstat -v %d", getpid()); -#endif - FILE *srcf = popen(buf, "r"); - if (!srcf) - return -1; - - while ((size = fread(buf, 1, sizeof buf, srcf))) { - if (_write_all(buf, size) < 0) { - pclose(srcf); - return -1; - } - } - pclose(srcf); -#endif - /* don't close() the file descriptor from here */ profile_file = -1; return 0; diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -4,12 +4,13 @@ #define VMPROF_JITTED_TAG 3 #define VMPROF_JITTING_TAG 4 #define VMPROF_GC_TAG 5 +#define VMPROF_ASSEMBLER_TAG 6 // whatever we want here typedef struct vmprof_stack { struct vmprof_stack* next; - long value; - long kind; + intptr_t value; + intptr_t kind; } vmprof_stack; // the kind is WORD so we consume exactly 3 WORDs and we don't have From pypy.commits at gmail.com Sun Jan 10 10:56:41 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 07:56:41 -0800 (PST) Subject: [pypy-commit] pypy py3.3: hg merge py3k (+ fixes) Message-ID: <56927f39.42cbc20a.18060.7114@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r81652:88aafcb7c318 Date: 2016-01-10 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/88aafcb7c318/ Log: hg merge py3k (+ fixes) diff too long, truncating to 2000 out of 122816 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -15,3 +15,5 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 +f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -56,14 +56,15 @@ Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen + Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -87,7 +88,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -168,7 +168,6 @@ Michael Twomey Lucian Branescu Mihaila Yichao Yu - Anton Gulenko Gabriel Lavoie Olivier Dormond Jared Grubb @@ -201,9 +200,12 @@ Alex Perry Vincent Legoll Alan McIntyre + Spenser Bauman Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre + Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg @@ -215,6 +217,7 @@ Carl Meyer Karl Ramm Pieter Zieschang + Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -234,6 +237,7 @@ Lutz Paelike Lucio Torre Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo Dan Buch Miguel de Val Borro @@ -244,14 +248,17 @@ Martin Blais Lene Wagner Tomo Cocoa + Kim Jin Su Toni Mattis Lucas Stadler Julian Berman + Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez William Leslie Bobby Impollonia + Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara @@ -282,6 +289,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey @@ -308,13 +316,16 @@ Stefan Marr jiaaro Mads Kiilerich + Richard Lancaster opassembler.py Antony Lee + Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer Even Wiik Thomassen jbs squeaky + Zearin soareschen Kurt Griffiths Mike Bayer @@ -326,6 +337,7 @@ Anna Ravencroft Andrey Churin Dan Crosta + Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz @@ -352,8 +364,7 @@ Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files in the 'lib-python/2.7' directory are all copyrighted by the Python Software Foundation and licensed -under the Python Software License of which you can find a copy here: -http://www.python.org/doc/Copyright.html +under the terms that you can find here: https://docs.python.org/2/license.html License for 'pypy/module/unicodedata/' ====================================== @@ -430,12 +441,12 @@ _gdbm module, provided in the file lib_pypy/_gdbm.py, is redistributed under the terms of the GPL license as well. -License for 'pypy/module/_vmprof/src' +License for 'rpython/rlib/rvmprof/src' -------------------------------------- The code is based on gperftools. You may see a copy of the License for it at - https://code.google.com/p/gperftools/source/browse/COPYING + https://github.com/gperftools/gperftools/blob/master/COPYING License for 'liblzma and 'lzmaffi' ---------------------------------- diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_abcoll.py @@ -0,0 +1,623 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +DON'T USE THIS MODULE DIRECTLY! The classes here should be imported +via collections; they are defined here only to alleviate certain +bootstrapping issues. Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + + +### collection related types which are not exposed through builtin ### +## iterators ## +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +dict_proxy = type(type.__dict__) + + +### ONE-TRICK PONIES ### + +class Hashable(metaclass=ABCMeta): + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + for B in C.__mro__: + if "__hash__" in B.__dict__: + if B.__dict__["__hash__"]: + return True + break + return NotImplemented + + +class Iterable(metaclass=ABCMeta): + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if any("__iter__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Iterator(Iterable): + + @abstractmethod + def __next__(self): + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + if (any("__next__" in B.__dict__ for B in C.__mro__) and + any("__iter__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + +class Sized(metaclass=ABCMeta): + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if any("__len__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Container(metaclass=ABCMeta): + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if any("__contains__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Callable(metaclass=ABCMeta): + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + if any("__call__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +### SETS ### + + +class Set(Sized, Iterable, Container): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other.__lt__(self) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other.__le__(self) + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + def __ne__(self, other): + return not (self == other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + def isdisjoint(self, other): + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + return KeysView(self) + + def items(self): + return ItemsView(self) + + def values(self): + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + def __ne__(self, other): + return not (self == other) + + +class MappingView(Sized): + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + for key in self._mapping: + yield key + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView): + + def __contains__(self, value): + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + if len(args) > 2: + raise TypeError("update() takes at most 2 positional " + "arguments ({} given)".format(len(args))) + elif not args: + raise TypeError("update() takes at least 1 argument (0 given)") + self = args[0] + other = args[1] if len(args) >= 2 else () + + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Sized, Iterable, Container): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value): + for i, v in enumerate(self): + if v == value: + return i + raise ValueError + + def count(self, value): + return sum(1 for v in self if v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + raise IndexError + + def append(self, value): + self.insert(len(self), value) + + def reverse(self): + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + for v in values: + self.append(v) + + def pop(self, index=-1): + v = self[index] + del self[index] + return v + + def remove(self, value): + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py new file mode 100644 --- /dev/null +++ b/lib-python/3/collections.py @@ -0,0 +1,1091 @@ +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict'] +# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. +# They should however be considered an integral part of collections.py. +from _abcoll import * +import _abcoll +__all__ += _abcoll.__all__ + +from _collections import deque, defaultdict +from operator import itemgetter as _itemgetter +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +################################################################################ +### OrderedDict +################################################################################ + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last==False). + + Raises KeyError if the element does not exist. + When last=True, acts like a fast version of self[key]=self.pop(key). + + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + last.next = root.prev = link + else: + first = root.next + link.prev = root + link.next = first + root.next = first.prev = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = MutableMapping.update + keys = MutableMapping.keys + values = MutableMapping.values + items = MutableMapping.items + __ne__ = MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + +################################################################################ +### namedtuple +################################################################################ + +_class_template = '''\ +from builtins import property as _property, tuple as _tuple +from operator import itemgetter as _itemgetter +from collections import OrderedDict + +class {typename}(tuple): + '{typename}({arg_list})' + + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + '({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values' + return OrderedDict(zip(self._fields, self)) + + __dict__ = property(_asdict) + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % list(kwds)) + return result + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + +{field_defs} +''' + +_repr_template = '{name}=%r' + +_field_template = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + +def namedtuple(typename, field_names, verbose=False, rename=False): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = list(map(str, field_names)) + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not all(c.isalnum() or c=='_' for c in name) + or _iskeyword(name) + or not name + or name[0].isdigit() + or name.startswith('_') + or name in seen): + field_names[index] = '_%d' % index + seen.add(name) + for name in [typename] + field_names: + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen: + raise ValueError('Encountered duplicate field name: %r' % name) + seen.add(name) + + # Fill-in the class template + class_definition = _class_template.format( + typename = typename, + field_names = tuple(field_names), + num_fields = len(field_names), + arg_list = repr(tuple(field_names)).replace("'", "")[1:-1], + repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names), + field_defs = '\n'.join(_field_template.format(index=index, name=name) + for index, name in enumerate(field_names)) + ) + + # Execute the template string in a temporary namespace and + # support tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(__name__='namedtuple_%s' % typename) + try: + exec(class_definition, namespace) + except SyntaxError as e: + raise SyntaxError(e.msg + ':\n\n' + class_definition) + result = namespace[typename] + if verbose: + print(class_definition) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + super().__init__() + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super().update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + +######################################################################## +### ChainMap (helper for configparser) +######################################################################## + +class _ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(MutableMapping): + + # Start by filling-out the abstract methods + def __init__(self, dict=None, **kwargs): + self.data = {} + if dict is not None: + self.update(dict) + if len(kwargs): + self.update(kwargs) + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + + +################################################################################ +### UserList +################################################################################ + +class UserList(MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __ne__(self, other): return self.data != self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __ne__(self, string): + if isinstance(string, UserString): + return self.data != string.data + return self.data != string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding=None, errors=None): # XXX improve this? + if encoding: + if errors: + return self.__class__(self.data.encode(encoding, errors)) + return self.__class__(self.data.encode(encoding)) + return self.__class__(self.data.encode()) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, *args, **kwds): + return self.data.format(*args, **kwds) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=0): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) + + + +################################################################################ +### Simple tests +################################################################################ + +if __name__ == '__main__': + # verify that instances can be pickled + from pickle import loads, dumps + Point = namedtuple('Point', 'x, y', True) + p = Point(x=10, y=20) + assert p == loads(dumps(p)) + + # test and demonstrate ability to override methods + class Point(namedtuple('Point', 'x y')): + __slots__ = () + @property + def hypot(self): + return (self.x ** 2 + self.y ** 2) ** 0.5 + def __str__(self): + return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) + + for p in Point(3, 4), Point(14, 5/7.): + print (p) + + class Point(namedtuple('Point', 'x y')): + 'Point class with optimized _make() and _replace() without error-checking' + __slots__ = () + _make = classmethod(tuple.__new__) + def _replace(self, _map=map, **kwds): + return self._make(_map(kwds.get, ('x', 'y'), self)) + + print(Point(11, 22)._replace(x=100)) + + Point3D = namedtuple('Point3D', Point._fields + ('z',)) + print(Point3D.__doc__) + + import doctest + TestResults = namedtuple('TestResults', 'failed attempted') + print(TestResults(*doctest.testmod())) diff --git a/lib-python/3/curses/wrapper.py b/lib-python/3/curses/wrapper.py new file mode 100644 --- /dev/null +++ b/lib-python/3/curses/wrapper.py @@ -0,0 +1,50 @@ +"""curses.wrapper + +Contains one function, wrapper(), which runs another function which +should be the rest of your curses-based application. If the +application raises an exception, wrapper() will restore the terminal +to a sane state so you can read the resulting traceback. + +""" + +import curses + +def wrapper(func, *args, **kwds): + """Wrapper function that initializes curses and calls another function, + restoring normal keyboard/screen behavior on error. + The callable object 'func' is then passed the main window 'stdscr' + as its first argument, followed by any other arguments passed to + wrapper(). + """ + + try: + # Initialize curses + stdscr = curses.initscr() + + # Turn off echoing of keys, and enter cbreak mode, + # where no buffering is performed on keyboard input + curses.noecho() + curses.cbreak() + + # In keypad mode, escape sequences for special keys + # (like the cursor keys) will be interpreted and + # a special value like curses.KEY_LEFT will be returned + stdscr.keypad(1) + + # Start color, too. Harmless if the terminal doesn't have + # color; user can test with has_color() later on. The try/catch + # works around a minor bit of over-conscientiousness in the curses + # module -- the error return from C start_color() is ignorable. + try: + curses.start_color() + except: + pass + + return func(stdscr, *args, **kwds) + finally: + # Set everything back to normal + if 'stdscr' in locals(): + stdscr.keypad(0) + curses.echo() + curses.nocbreak() + curses.endwin() diff --git a/lib-python/3/email/test/__init__.py b/lib-python/3/email/test/__init__.py new file mode 100644 diff --git a/lib-python/3/email/test/data/PyBanner048.gif b/lib-python/3/email/test/data/PyBanner048.gif new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1a5c87f647fbf33e5b46103119c9fd42afbe9e5d GIT binary patch [cut] diff --git a/lib-python/3/email/test/data/audiotest.au b/lib-python/3/email/test/data/audiotest.au new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f76b0501b8c61b4fabbb3715b69a9434a42469cb GIT binary patch [cut] diff --git a/lib-python/3/email/test/data/msg_01.txt b/lib-python/3/email/test/data/msg_01.txt new file mode 100644 --- /dev/null +++ b/lib-python/3/email/test/data/msg_01.txt @@ -0,0 +1,19 @@ +Return-Path: +Delivered-To: bbb at zzz.org +Received: by mail.zzz.org (Postfix, from userid 889) + id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT) +MIME-Version: 1.0 From pypy.commits at gmail.com Sun Jan 10 14:32:13 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 11:32:13 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fixes Message-ID: <5692b1bd.25fac20a.12430.5370@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81653:451e0179aa73 Date: 2016-01-10 21:31 +0200 http://bitbucket.org/pypy/pypy/changeset/451e0179aa73/ Log: fixes diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -1,13 +1,20 @@ -import os +import os, py from rpython.jit.backend.test.support import CCompiledMixin from rpython.rlib.jit import JitDriver from rpython.tool.udir import udir +from rpython.translator.translator import TranslationContext from rpython.jit.backend.detect_cpu import getcpuclass class CompiledVmprofTest(CCompiledMixin): CPUClass = getcpuclass() + def _get_TranslationContext(self): + t = TranslationContext() + t.config.translation.gc = 'incminimark' + t.config.translation.list_comprehension_operations = True + return t + def test_vmprof(self): from rpython.rlib import rvmprof @@ -20,7 +27,8 @@ except rvmprof.VMProfPlatformUnsupported, e: py.test.skip(str(e)) - driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num']) + driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num'], + is_recursive=True) @rvmprof.vmprof_execute_code("xcode13", lambda code, num: code) def main(code, num): @@ -55,6 +63,9 @@ from vmprof import read_profile tmpfile = str(udir.join('test_rvmprof')) stats = read_profile(tmpfile) + t = stats.get_tree() + import pdb + pdb.set_trace() self.meta_interp(f, [100000000]) try: diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -26,7 +26,7 @@ result[n] = VMPROF_ASSEMBLER_TAG; result[n + 1] = start_addr; n += 2; - start = n + 2; + start = n; while (n < max_depth) { id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); if (id == -1) diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -155,7 +155,7 @@ // the bottom part is jitted, means we can fill up the first part // from the JIT n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); - bottom_jitted = 1; + stack = stack->next; // skip the first item as it contains garbage } while (n < max_depth - 1 && stack) { result[n] = stack->kind; diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -1,3 +1,5 @@ + +#include #define VMPROF_CODE_TAG 1 #define VMPROF_BLACKHOLE_TAG 2 From pypy.commits at gmail.com Sun Jan 10 14:34:26 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 11:34:26 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: kill #if 0ed code Message-ID: <5692b242.a3f6c20a.12549.ffffd681@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81654:0933fc0cc566 Date: 2016-01-10 21:33 +0200 http://bitbucket.org/pypy/pypy/changeset/0933fc0cc566/ Log: kill #if 0ed code diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -166,54 +166,6 @@ return n; } -#if 0 -static int xxx_get_stack_trace(void** result, int max_depth, ucontext_t *ucontext) -{ - void *ip; - int n = 0; - unw_cursor_t cursor; -#ifdef __APPLE__ - unw_context_t uc; - unw_getcontext(&uc); -#else - unw_context_t uc = *ucontext; -#endif - - int ret = unw_init_local(&cursor, &uc); - assert(ret >= 0); - (void)ret; - - while (n < max_depth) { - if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0) { - break; - } - - unw_proc_info_t pip; - unw_get_proc_info(&cursor, &pip); - - /* if n==0, it means that the signal handler interrupted us while we - were in the trampoline, so we are not executing (yet) the real main - loop function; just skip it */ - if (VMPROF_ADDR_OF_TRAMPOLINE((void*)pip.start_ip) && n > 0) { - // found main loop stack frame - void* sp; - unw_get_reg(&cursor, UNW_REG_SP, (unw_word_t *) &sp); - if (mainloop_get_virtual_ip) - ip = mainloop_get_virtual_ip((char *)sp); - else - ip = *(void **)sp; - } - - int first_run = (n == 0); - result[n++] = ip; - n = vmprof_write_header_for_jit_addr(result, n, ip, max_depth); - if (vmprof_unw_step(&cursor, first_run) <= 0) - break; - } - return n; -} -#endif - static intptr_t get_current_thread_id(void) { /* xxx This function is a hack on two fronts: From pypy.commits at gmail.com Sun Jan 10 14:35:27 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 11:35:27 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: kill more unused code Message-ID: <5692b27f.c9321c0a.e6ef0.fffffda7@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81655:f5f00966f723 Date: 2016-01-10 21:34 +0200 http://bitbucket.org/pypy/pypy/changeset/f5f00966f723/ Log: kill more unused code diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -38,15 +38,6 @@ /************************************************************/ -// functions copied from libunwind using dlopen - -#ifndef __APPLE__ // should be linux only probably -static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; -static int (*unw_step)(unw_cursor_t*) = NULL; -static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; -static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; -#endif - static int profile_file = -1; static long prepare_interval_usec; static struct profbuf_s *volatile current_codes; From pypy.commits at gmail.com Sun Jan 10 15:25:31 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 12:25:31 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix translation Message-ID: <5692be3b.03abc20a.58f46.ffff8384@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81656:f9c88ae567fd Date: 2016-01-10 22:24 +0200 http://bitbucket.org/pypy/pypy/changeset/f9c88ae567fd/ Log: fix translation diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -8,6 +8,7 @@ static long vmprof_write_header_for_jit_addr(intptr_t *result, long n, intptr_t ip, int max_depth) { +#ifdef PYPY_JIT_CODEMAP void *codemap; long current_pos = 0; intptr_t id; @@ -44,5 +45,6 @@ result[n - k - 1] = tmp; k++; } +#endif return n; } diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -142,12 +142,14 @@ intptr_t addr = 0; int bottom_jitted = 0; // check if the pc is in JIT +#ifdef PYPY_JIT_CODEMAP if (pypy_find_codemap_at_addr((intptr_t)pc, &addr)) { // the bottom part is jitted, means we can fill up the first part // from the JIT n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); stack = stack->next; // skip the first item as it contains garbage } +#endif while (n < max_depth - 1 && stack) { result[n] = stack->kind; result[n + 1] = stack->value; From pypy.commits at gmail.com Sun Jan 10 15:47:26 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 10 Jan 2016 12:47:26 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: don't release GIL here Message-ID: <5692c35e.c8921c0a.3196.4c3c@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81657:a2e3daa1386a Date: 2016-01-10 22:46 +0200 http://bitbucket.org/pypy/pypy/changeset/a2e3daa1386a/ Log: don't release GIL here diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -58,7 +58,7 @@ _nowrapper=True) vmprof_address_of_global_stack = rffi.llexternal( "vmprof_address_of_global_stack", [], rffi.CArrayPtr(lltype.Signed), - compilation_info=eci) + compilation_info=eci, _nowrapper=True) return CInterface(locals()) From pypy.commits at gmail.com Mon Jan 11 01:11:57 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 22:11:57 -0800 (PST) Subject: [pypy-commit] pypy default: Remove handling of DelayedPointer in all analyze_external_call() methods. Message-ID: <569347ad.e16ec20a.f0264.62b3@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81658:2dc5b1d863de Date: 2016-01-10 19:05 +0100 http://bitbucket.org/pypy/pypy/changeset/2dc5b1d863de/ Log: Remove handling of DelayedPointer in all analyze_external_call() methods. Delayed pointers to external functions don't make sense. The method analyze_external_call() was wrongly called because get_graph() returns None when encountering a delayed pointer and analyze() assumed it means that the function is an external function. A proper fix (with test) will be committed as a follow-up. diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -335,7 +335,7 @@ funcobj = op.args[0].value._obj if funcobj.random_effects_on_gcobjs: return True - except (AttributeError, lltype.DelayedPointer): + except AttributeError: return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -36,10 +36,7 @@ return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except lltype.DelayedPointer: - return True + funcobj = op.args[0].value._obj if getattr(funcobj, 'random_effects_on_gcobjs', False): return True return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -56,10 +56,7 @@ return self.bottom_result() def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except DelayedPointer: - return self.top_result() + funcobj = op.args[0].value._obj result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper From pypy.commits at gmail.com Mon Jan 11 01:12:00 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 22:12:00 -0800 (PST) Subject: [pypy-commit] pypy default: Change this part of the test to use an actual external function. Message-ID: <569347b0.41dfc20a.752aa.ffffc50b@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81659:3f19265058a5 Date: 2016-01-11 01:44 +0100 http://bitbucket.org/pypy/pypy/changeset/3f19265058a5/ Log: Change this part of the test to use an actual external function. This is required for a follow-up change which uses a more precise way to recognize external functions. diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -204,8 +204,7 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - z = lltype.functionptr(lltype.FuncType([lltype.Signed], lltype.Signed), - 'foobar') + z = llexternal('z', [lltype.Signed], lltype.Signed) def g(x): return z(x) t, ra = self.translate(g, [int]) From pypy.commits at gmail.com Mon Jan 11 01:12:01 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 22:12:01 -0800 (PST) Subject: [pypy-commit] pypy default: Change GraphAnalyzer to use a more precise way to recognize external functions. Message-ID: <569347b1.6650c20a.a11d6.ffffb08f@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81660:6c748497435c Date: 2016-01-11 01:50 +0100 http://bitbucket.org/pypy/pypy/changeset/6c748497435c/ Log: Change GraphAnalyzer to use a more precise way to recognize external functions. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -77,12 +77,14 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - graph = get_graph(op.args[0], self.translator) - if graph is None: + funcobj = op.args[0].value._obj + if getattr(funcobj, 'external', None) is not None: x = self.analyze_external_call(op, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x + graph = get_graph(op.args[0], self.translator) + assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) From pypy.commits at gmail.com Mon Jan 11 01:12:03 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 22:12:03 -0800 (PST) Subject: [pypy-commit] pypy default: Let GraphAnalyzer return a conservative result instead of crashing if it encounters a call of a delayed pointer. Message-ID: <569347b3.2467c20a.aad21.ffff8ed9@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81661:150c147032ee Date: 2016-01-11 06:11 +0100 http://bitbucket.org/pypy/pypy/changeset/150c147032ee/ Log: Let GraphAnalyzer return a conservative result instead of crashing if it encounters a call of a delayed pointer. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -77,7 +77,10 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - funcobj = op.args[0].value._obj + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.top_result() if getattr(funcobj, 'external', None) is not None: x = self.analyze_external_call(op, seen) if self.verbose and x: diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -1,7 +1,7 @@ import random from rpython.tool.algo.unionfind import UnionFind -from rpython.translator.backendopt.graphanalyze import Dependency -from rpython.translator.backendopt.graphanalyze import DependencyTracker +from rpython.translator.backendopt.graphanalyze import (Dependency, + DependencyTracker, BoolGraphAnalyzer) class FakeGraphAnalyzer: @@ -49,3 +49,19 @@ method1 = rectrack(n, tracker) method2 = expected(n) assert method1 == method2 + + +def test_delayed_fnptr(): + from rpython.flowspace.model import SpaceOperation + from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.translator.translator import TranslationContext + t = TranslationContext() + t.buildannotator() + t.buildrtyper() + annhelper = MixLevelHelperAnnotator(t.rtyper) + def f(): + pass + c_f = annhelper.constfunc(f, [], None) + op = SpaceOperation('direct_call', [c_f], None) + analyzer = BoolGraphAnalyzer(t) + assert analyzer.analyze(op) From pypy.commits at gmail.com Mon Jan 11 01:12:05 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 10 Jan 2016 22:12:05 -0800 (PST) Subject: [pypy-commit] pypy default: Let GraphAnalyzer's analyze_external_call() method take a function object instead of the call operation. Message-ID: <569347b5.42cbc20a.18060.4584@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81662:dd0391f9a590 Date: 2016-01-11 06:16 +0100 http://bitbucket.org/pypy/pypy/changeset/dd0391f9a590/ Log: Let GraphAnalyzer's analyze_external_call() method take a function object instead of the call operation. diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -330,15 +330,14 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_external_call(self, op, seen=None): + def analyze_external_call(self, funcobj, seen=None): try: - funcobj = op.args[0].value._obj if funcobj.random_effects_on_gcobjs: return True except AttributeError: return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( - op, seen) + funcobj, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -35,12 +35,11 @@ return True return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) - def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj + def analyze_external_call(self, funcobj, seen=None): if getattr(funcobj, 'random_effects_on_gcobjs', False): return True - return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_external_call( + self, funcobj, seen) def analyze_simple_operation(self, op, graphinfo): if op.opname in ('malloc', 'malloc_varsize'): flags = op.args[1].value diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -22,8 +22,7 @@ log.WARNING("Unknown operation: %s" % op.opname) return True - def analyze_external_call(self, op, seen=None): - fnobj = op.args[0].value._obj + def analyze_external_call(self, fnobj, seen=None): return getattr(fnobj, 'canraise', True) analyze_exceptblock = None # don't call this diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -55,8 +55,7 @@ def analyze_startblock(self, block, seen=None): return self.bottom_result() - def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj + def analyze_external_call(self, funcobj, seen=None): result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper @@ -82,7 +81,7 @@ except DelayedPointer: return self.top_result() if getattr(funcobj, 'external', None) is not None: - x = self.analyze_external_call(op, seen) + x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x From pypy.commits at gmail.com Mon Jan 11 04:13:30 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 01:13:30 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: missing fixes for pypy Message-ID: <5693723a.ccaa1c0a.51e03.314e@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81663:6d7759d0694a Date: 2016-01-11 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/6d7759d0694a/ Log: missing fixes for pypy diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -24,9 +24,8 @@ // modify the last entry to point to start address and not the random one // in the middle - result[n] = VMPROF_ASSEMBLER_TAG; - result[n + 1] = start_addr; - n += 2; + result[n++] = VMPROF_ASSEMBLER_TAG; + result[n++] = start_addr; start = n; while (n < max_depth) { id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -151,10 +151,19 @@ } #endif while (n < max_depth - 1 && stack) { - result[n] = stack->kind; - result[n + 1] = stack->value; - stack = stack->next; - n += 2; + if (stack->kind == VMPROF_CODE_TAG) { + result[n] = stack->kind; + result[n + 1] = stack->value; + stack = stack->next; + n += 2; + } +#ifdef PYPY_JIT_CODEMAP + if (stack->kind == VMPROF_JITTED_TAG) { + pc = stack->value; + n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); + stack = stack->next; + } +#endif } return n; } From pypy.commits at gmail.com Mon Jan 11 04:25:53 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 01:25:53 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: ups a better way Message-ID: <56937521.520e1c0a.9f3f7.ffffce5b@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81664:94707b484f65 Date: 2016-01-11 11:24 +0200 http://bitbucket.org/pypy/pypy/changeset/94707b484f65/ Log: ups a better way diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -159,7 +159,7 @@ } #ifdef PYPY_JIT_CODEMAP if (stack->kind == VMPROF_JITTED_TAG) { - pc = stack->value; + pc = (intptr_t*)(stack->value - sizeof(intptr_t))[0]; n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); stack = stack->next; } From pypy.commits at gmail.com Mon Jan 11 04:40:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 01:40:31 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: adapted values unpacked from the gcmap for the s390x case Message-ID: <5693788f.c8b3c20a.e4b97.2031@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81665:c7594062a728 Date: 2016-01-11 10:39 +0100 http://bitbucket.org/pypy/pypy/changeset/c7594062a728/ Log: adapted values unpacked from the gcmap for the s390x case diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -92,7 +92,7 @@ elif self.cpu.backend_name.startswith('ppc64'): assert nos == [0, 1, 33] elif self.cpu.backend_name.startswith('zarch'): - assert nos == [0, 1, 35] + assert nos == [2, 3, 35] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] @@ -647,11 +647,12 @@ gcmap = unpack_gcmap(frame) if self.cpu.backend_name.startswith('ppc64'): assert gcmap == [30, 31, 32] + elif self.cpu.backend_name.startswith('zarch'): + assert gcmap == [32, 33, 34] elif self.cpu.IS_64_BIT: assert gcmap == [28, 29, 30] elif self.cpu.backend_name.startswith('arm'): assert gcmap == [44, 45, 46] - pass else: assert gcmap == [22, 23, 24] for item, s in zip(gcmap, new_items): From pypy.commits at gmail.com Mon Jan 11 04:46:05 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 01:46:05 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: I'll never get it right Message-ID: <569379dd.ea5ec20a.cd36.ffff8191@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81666:9ca1bce08a8b Date: 2016-01-11 11:44 +0200 http://bitbucket.org/pypy/pypy/changeset/9ca1bce08a8b/ Log: I'll never get it right diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -159,7 +159,7 @@ } #ifdef PYPY_JIT_CODEMAP if (stack->kind == VMPROF_JITTED_TAG) { - pc = (intptr_t*)(stack->value - sizeof(intptr_t))[0]; + pc = ((intptr_t*)(stack->value - sizeof(intptr_t)))[0]; n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); stack = stack->next; } From pypy.commits at gmail.com Mon Jan 11 05:06:16 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 02:06:16 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: ups Message-ID: <56937e98.02371c0a.a53b0.7a3e@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81667:65638eea1b89 Date: 2016-01-11 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/65638eea1b89/ Log: ups diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -158,7 +158,7 @@ n += 2; } #ifdef PYPY_JIT_CODEMAP - if (stack->kind == VMPROF_JITTED_TAG) { + else if (stack->kind == VMPROF_JITTED_TAG) { pc = ((intptr_t*)(stack->value - sizeof(intptr_t)))[0]; n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); stack = stack->next; From pypy.commits at gmail.com Mon Jan 11 05:30:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 02:30:19 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: translation changes to the instruction builder, this was up to now quite dynamic Message-ID: <5693843b.cb941c0a.4fbee.4854@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81668:dab47ded5b1d Date: 2016-01-11 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/dab47ded5b1d/ Log: translation changes to the instruction builder, this was up to now quite dynamic diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -512,7 +512,6 @@ # registers). mc = InstrBuilder() # - mc.trap() # mc.STG(r.r14, l.addr(14*WORD, r.SP)) # Do the call mc.push_std_frame() @@ -526,7 +525,7 @@ mc.LG(r.SCRATCH, l.addr(0, r.SCRATCH)) # if this comparison is true, then everything is ok, # else we have an exception - mc.cmp_op(r.SCRATCH, 0, imm=True) + mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # # So we return to our caller, conditionally if "EQ" # mc.LG(r.r14, l.addr(14*WORD, r.SP)) diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -436,18 +436,55 @@ return encode_rie_c def build_unpack_func(mnemonic, func): - def function(self, *args): - newargs = [None] * len(func._arguments_) - for i,arg in enumerate(unrolling_iterable(func._arguments_)): - if arg == '-': - newargs[i] = 0 - elif arg == 'r' or arg == 'r/m' or arg == 'f' or arg == 'eo': - newargs[i] = args[i].value - elif arg.startswith('i') or arg.startswith('u') or arg.startswith('h'): - newargs[i] = args[i].value - else: - newargs[i] = args[i] - return func(self, *newargs) + @always_inline + def unpack_arg(arg, argtype): + if argtype == '-': + return 0 + elif argtype == 'r' or argtype == 'r/m' or \ + argtype == 'f' or argtype == 'eo': + return arg.value + elif argtype.startswith('i') or argtype.startswith('u') or argtype.startswith('h'): + return arg.value + else: + return arg + unpack_arg._annspecialcase_ = 'specialize:arg(1)' + argtypes = func._arguments_ + at = argtypes[0] if len(argtypes) >= 1 else '-' + bt = argtypes[1] if len(argtypes) >= 2 else '-' + ct = argtypes[2] if len(argtypes) >= 3 else '-' + dt = argtypes[3] if len(argtypes) >= 4 else '-' + def function0(self): + return func(self) + def function1(self, a): + e = unpack_arg(a, at) + return func(self, e) + def function2(self, a, b): + e = unpack_arg(a, at) + f = unpack_arg(b, bt) + return func(self, e, f) + def function3(self, a, b, c): + e = unpack_arg(a, at) + f = unpack_arg(b, bt) + g = unpack_arg(c, ct) + return func(self, e, f, g) + def function4(self, a, b): + e = unpack_arg(a, at) + f = unpack_arg(b, bt) + g = unpack_arg(c, ct) + h = unpack_arg(d, dt) + return func(self, e, f, g, h) + if len(argtypes) == 0: + function = function0 + elif len(argtypes) == 1: + function = function1 + elif len(argtypes) == 2: + function = function2 + elif len(argtypes) == 3: + function = function3 + elif len(argtypes) == 4: + function = function4 + else: + assert 0, "implement function for argtypes %s" % (argtypes,) function.__name__ = mnemonic return function diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -73,3 +73,8 @@ cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)' cast_ptr_to_int = staticmethod(cast_ptr_to_int) + def build_regalloc(self): + ''' NOT_RPYTHON: for tests ''' + from rpython.jit.backend.zarch.regalloc import Regalloc + assert self.assembler is not None + return Regalloc(self.assembler) From pypy.commits at gmail.com Mon Jan 11 05:52:22 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 11 Jan 2016 02:52:22 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56938966.ccaa1c0a.51e03.577a@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r691:5c4b20a06a45 Date: 2016-01-11 11:52 +0100 http://bitbucket.org/pypy/pypy.org/changeset/5c4b20a06a45/ Log: update the values diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $52771 of $60000 (88.0%) + $53142 of $60000 (88.6%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30322 of $80000 (37.9%) + $30344 of $80000 (37.9%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Mon Jan 11 06:36:17 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 03:36:17 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: small fixes Message-ID: <569393b1.6217c20a.2560b.6b8d@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81669:80db53f41aa2 Date: 2016-01-11 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/80db53f41aa2/ Log: small fixes diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -19,9 +19,14 @@ from rpython.rlib import rvmprof class MyCode: - pass + def __init__(self, name): + self.name = name + def get_name(code): - return 'py:code:52:x' + return code.name + + code2 = MyCode("py:y:foo:4") + try: rvmprof.register_code_object_class(MyCode, get_name) except rvmprof.VMProfPlatformUnsupported, e: @@ -40,15 +45,15 @@ while i < num: driver.jit_merge_point(code=code, i=i, s=s, num=num) s += (i << 1) - if s % 32423423423 == 0 and s > 0 == 0: - print s + if s % 3 == 0 and code is not code2: + main(code2, 100) i += 1 return s tmpfilename = str(udir.join('test_rvmprof')) def f(num): - code = MyCode() + code = MyCode("py:x:foo:3") rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) period = 0.0001 @@ -67,7 +72,7 @@ import pdb pdb.set_trace() - self.meta_interp(f, [100000000]) + self.meta_interp(f, [1000000]) try: import vmprof except ImportError: diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -33,7 +33,7 @@ self._code_classes = set() self._gather_all_code_objs = lambda: None self._cleanup_() - self._code_unique_id = 0 + self._code_unique_id = 4 self.cintf = cintf.setup() def _cleanup_(self): diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -154,16 +154,15 @@ if (stack->kind == VMPROF_CODE_TAG) { result[n] = stack->kind; result[n + 1] = stack->value; - stack = stack->next; n += 2; } #ifdef PYPY_JIT_CODEMAP else if (stack->kind == VMPROF_JITTED_TAG) { pc = ((intptr_t*)(stack->value - sizeof(intptr_t)))[0]; n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); - stack = stack->next; } #endif + stack = stack->next; } return n; } From pypy.commits at gmail.com Mon Jan 11 07:56:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 04:56:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: push pop from jitframe optimization (using store/load multiple instruction on s390x) + test Message-ID: <5693a662.a453c20a.c8259.ffff8ba3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81670:6c8cea4f1f86 Date: 2016-01-11 13:52 +0100 http://bitbucket.org/pypy/pypy/changeset/6c8cea4f1f86/ Log: push pop from jitframe optimization (using store/load multiple instruction on s390x) + test diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -512,7 +512,7 @@ # registers). mc = InstrBuilder() # - # mc.STG(r.r14, l.addr(14*WORD, r.SP)) + mc._push_core_regs_to_jitframe([r.r14]) # store the link on the jit frame # Do the call mc.push_std_frame() mc.LGR(r.r2, r.SP) @@ -527,6 +527,7 @@ # else we have an exception mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # + mc._pop_core_regs_from_jitframe([r.r14]) # restore the link on the jit frame # So we return to our caller, conditionally if "EQ" # mc.LG(r.r14, l.addr(14*WORD, r.SP)) mc.BCR(c.EQ, r.r14) @@ -551,16 +552,14 @@ endaddr, lengthaddr, _ = self.cpu.insert_stack_check() diff = lengthaddr - endaddr assert check_imm_value(diff) - xxx mc = self.mc - mc.load_imm(r.SCRATCH, self.stack_check_slowpath) - mc.load_imm(r.SCRATCH2, endaddr) # li r2, endaddr - mc.mtctr(r.SCRATCH.value) - mc.load(r.SCRATCH.value, r.SCRATCH2.value, 0) # ld r0, [end] - mc.load(r.SCRATCH2.value, r.SCRATCH2.value, diff)# ld r2, [length] - mc.subf(r.SCRATCH.value, r.SP.value, r.SCRATCH.value) # sub r0, SP - mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value, signed=False) + mc.load_imm(r.SCRATCH2, endaddr) # li r0, endaddr + mc.branch_absolute(self.stack_check_slowpath) + mc.load(r.SCRATCH, r.SCRATCH2, 0) # lg r1, [end] + mc.load(r.SCRATCH2, r.SCRATCH2, diff)# lg r0, [length] + mc.SGR(r.SCRATCH, r.SP) # sub r1, SP + mc.cmp_op(r.SCRATCH, r.SCRATCH2, signed=False) mc.bgtctrl() def _check_frame_depth(self, mc, gcmap): @@ -1057,21 +1056,71 @@ mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) def _push_core_regs_to_jitframe(self, mc, includes=r.registers): + self._multiple_to_or_from_jitframe(mc, includes, store=True) + + @specialize.arg(3) + def _multiple_to_or_from_jitframe(self, mc, includes, store): if len(includes) == 0: return base_ofs = self.cpu.get_baseofs_of_frame_field() - base = includes[0].value + if len(includes) == 1: + iv = includes[0] + addr = l.addr(base_ofs + iv.value * WORD, r.SPP) + if store: + mc.STG(iv, addr) + else: + mc.LG(iv, addr) + return + val = includes[0].value - for register in includes: - if register.value != val: - break - val += 1 + # includes[i => j] + # for each continous sequence in the registers are stored + # with STMG instead of STG, in the best case this only leads + # to 1 instruction to store r.ri -> r.rj (if it is continuous) + i = 0 + j = 1 + for register in includes[1:]: + if i >= j: + j += 1 + continue + regval = register.value + if regval != (val+1): + iv = includes[i] + diff = (val - iv.value) + addr = l.addr(base_ofs + iv.value * WORD, r.SPP) + if diff > 0: + if store: + mc.STMG(iv, includes[i+diff], addr) + else: + mc.LMG(iv, includes[i+diff], addr) + i = j + else: + if store: + mc.STG(iv, addr) + else: + mc.LG(iv, addr) + i = j + val = regval + j += 1 + if i >= len(includes): + # all have been stored + return + diff = (val - includes[i].value) + iv = includes[i] + addr = l.addr(base_ofs + iv.value * WORD, r.SPP) + if diff > 0: + if store: + mc.STMG(iv, includes[-1], addr) + else: + mc.LMG(iv, includes[-1], addr) else: - mc.STMG(includes[0], includes[-1], l.addr(base_ofs + base * WORD, r.SPP)) - return - # unordered! - for register in includes: - mc.STG(register, l.addr(base_ofs + register.value * WORD, r.SPP)) + if store: + mc.STG(iv, addr) + else: + mc.LG(iv, addr) + + def _pop_core_regs_from_jitframe(self, mc, includes=r.MANAGED_REGS): + self._multiple_to_or_from_jitframe(mc, includes, store=False) def _push_fp_regs_to_jitframe(self, mc, includes=r.fpregisters): if len(includes) == 0: @@ -1081,11 +1130,6 @@ for i,reg in enumerate(includes): mc.STDY(reg, l.addr(base_ofs + (v+i) * WORD, r.SPP)) - def _pop_core_regs_from_jitframe(self, mc, includes=r.MANAGED_REGS): - base_ofs = self.cpu.get_baseofs_of_frame_field() - for reg in includes: - mc.LG(reg, l.addr(base_ofs + reg.value * WORD, r.SPP)) - def _pop_fp_regs_from_jitframe(self, mc, includes=r.MANAGED_FP_REGS): base_ofs = self.cpu.get_baseofs_of_frame_field() v = 16 diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -673,7 +673,6 @@ self._emit_guard(op, arglocs[1:]) def emit_guard_subclass(self, op, arglocs, regalloc): - xxx assert self.cpu.supports_guard_gc_type loc_object = arglocs[0] loc_check_against_class = arglocs[1] @@ -681,10 +680,10 @@ offset2 = self.cpu.subclassrange_min_offset if offset is not None: # read this field to get the vtable pointer - self.mc(r.SCRATCH2, l.addr(offset, loc_object)) + self.mc.LG(r.SCRATCH2, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field assert check_imm(offset2) - self.mc.ld(r.SCRATCH2.value, r.SCRATCH2.value, offset2) + self.mc.LG(r.SCRATCH2.value, r.SCRATCH2.value, offset2) else: # read the typeid self._read_typeid(r.SCRATCH, loc_object) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -2,6 +2,7 @@ from rpython.jit.backend.zarch import locations as l from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) +from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.resoperation import rop from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.jit.backend.zarch.arch import (WORD, @@ -36,7 +37,6 @@ self.reserve_literal(8) elif op.getopnum() == rop.LABEL: descr = op.getdescr() - descr._ll_loop_pool = self.pool_start if descr not in asm.target_tokens_currently_compiling: # this is a 'long' jump instead of a relative jump self.offset_map[descr] = self.size @@ -121,12 +121,10 @@ self.pool_start = asm.mc.get_relative_pos() for op in operations: self.ensure_can_hold_constants(asm, op) - if self.size == 0 and written != 0: + if self.size == 0: # no pool needed! return - assert self.size % 2 == 0 - #if self.size % 2 == 1: - # self.size += 1 + assert self.size % 2 == 0, "not aligned properly" asm.mc.write('\x00' * self.size) written = 0 if self.constant_64_ones != -1: @@ -146,7 +144,8 @@ self.constant_max_64_positive = self.size written += 8 self.size += written - print "pool with %d quad words" % (self.size // 8) + if not we_are_translated(): + print "pool with %d quad words" % (self.size // 8) def overwrite_64(self, mc, index, value): index += self.pool_start @@ -165,7 +164,8 @@ if self.size == 0: return for val, offset in self.offset_map.items(): - print val, offset + if not we_are_translated(): + print('pool: %s at offset: %d' % (val, offset)) if val.is_constant(): if val.type == FLOAT: self.overwrite_64(mc, offset, float2longlong(val.value)) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -370,3 +370,72 @@ self.mc.LGHI(reg.r2, loc.imm(1)) self.a.jmpto(reg.r14) assert run_asm(self.a) == 1 + + def pushpop_jitframe(self, registers): + self.a._push_core_regs_to_jitframe(self.mc, registers) + self.a._pop_core_regs_from_jitframe(self.mc, registers) + + def test_pushpop_jitframe_multiple_optimization(self): + stored = [] + loaded = [] + def STMG(start, end, addr): + stored.append((start, end)) + def STG(reg, addr): + stored.append((reg,)) + def LMG(start, end, addr): + loaded.append((start, end)) + def LG(reg, addr): + loaded.append((reg,)) + self.mc.STMG = STMG + self.mc.STG = STG + self.mc.LMG = LMG + self.mc.LG = LG + + r = reg + + # two sequences 10-11, 13-14 + self.pushpop_jitframe([r.r10, r.r11, r.r13, r.r14]) + assert stored == [(r.r10, r.r11), (r.r13, r.r14)] + assert stored == loaded + stored = [] + loaded = [] + + # one sequence and on single + self.pushpop_jitframe([r.r0, r.r1, r.r3]) + assert stored == [(r.r0, r.r1), (r.r3,)] + assert stored == loaded + stored = [] + loaded = [] + + # single items + self.pushpop_jitframe(r.registers[::2]) + assert stored == [(x,) for x in r.registers[::2]] + assert stored == loaded + stored = [] + loaded = [] + + # large sequence 0-5 and one hole between + self.pushpop_jitframe([r.r0, r.r1, r.r2, r.r3, + r.r4, r.r5, r.r12, r.r13]) + assert stored == [(r.r0, r.r5), (r.r12, r.r13)] + assert stored == loaded + stored = [] + loaded = [] + + # ensure there is just on instruction for the 'best case' + self.pushpop_jitframe(r.registers) + assert stored == [(r.r0, r.r15)] + assert stored == loaded + stored = [] + loaded = [] + + # just one single + for r in [r.r14, r.r0, r.r1, r.r15]: + self.pushpop_jitframe([r]) + assert stored == [(r,)] + assert stored == loaded + stored = [] + loaded = [] + + + From pypy.commits at gmail.com Mon Jan 11 07:56:04 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 04:56:04 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added case where parameters are not ordered Message-ID: <5693a664.465fc20a.c35be.ffff9998@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81671:4f539fd728fc Date: 2016-01-11 13:55 +0100 http://bitbucket.org/pypy/pypy/changeset/4f539fd728fc/ Log: added case where parameters are not ordered diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -430,12 +430,19 @@ loaded = [] # just one single - for r in [r.r14, r.r0, r.r1, r.r15]: - self.pushpop_jitframe([r]) - assert stored == [(r,)] + for x in [r.r14, r.r0, r.r1, r.r15]: + self.pushpop_jitframe([x]) + assert stored == [(x,)] assert stored == loaded stored = [] loaded = [] + # unordered + self.pushpop_jitframe([r.r14, r.r8, r.r4, r.r0]) + assert stored == [(r.r14,), (r.r8,), (r.r4,), (r.r0,)] + assert stored == loaded + stored = [] + loaded = [] + From pypy.commits at gmail.com Mon Jan 11 08:28:28 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 05:28:28 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: guard_subclass, guard_is_object ported to s390x Message-ID: <5693adfc.53ad1c0a.2a38.2f37@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81672:39f75f1f6c6b Date: 2016-01-11 14:27 +0100 http://bitbucket.org/pypy/pypy/changeset/39f75f1f6c6b/ Log: guard_subclass, guard_is_object ported to s390x simplified the code in int_shift_left (use some old code of the regalloc) impl of stack check slowpath and stack check diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -529,12 +529,11 @@ # mc._pop_core_regs_from_jitframe([r.r14]) # restore the link on the jit frame # So we return to our caller, conditionally if "EQ" - # mc.LG(r.r14, l.addr(14*WORD, r.SP)) mc.BCR(c.EQ, r.r14) # # Else, jump to propagate_exception_path assert self.propagate_exception_path - mc.b_abs(self.propagate_exception_path) + mc.branch_absolute(self.propagate_exception_path) # rawstart = mc.materialize(self.cpu, []) self.stack_check_slowpath = rawstart diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -112,15 +112,10 @@ # in the addr part of the instruction l1 = addr(a1.getint()) else: - self.rm.ensure_in_reg(a1, r.SCRATCH) - l1 = addr(0, r.SCRATCH) - l0 = self.ensure_reg(a0) - if l0.is_in_pool(): - loc = self.force_allocate_reg(op) - self.assembler.mc.LG(loc, l0) - l0 = loc - else: - self.force_result_in_reg(op, a0) + tmp = self.rm.ensure_reg(a1, force_in_reg=True) + l1 = addr(0, tmp) + l0 = self.ensure_reg(a0, force_in_reg=True) + self.force_result_in_reg(op, a0) self.free_op_vars() return [l0, l1] diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -634,10 +634,7 @@ # Note that the typeid half-word is at offset 0 on a little-endian # machine; it is at offset 2 or 4 on a big-endian machine. assert self.cpu.supports_guard_gc_type - if IS_PPC_32: - self.mc.lhz(targetreg.value, loc_ptr.value, 2 * IS_BIG_ENDIAN) - else: - self.mc.lwz(targetreg.value, loc_ptr.value, 4 * IS_BIG_ENDIAN) + self.mc.LGF(targetreg, l.addr(4, loc_ptr)) def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): self._read_typeid(r.SCRATCH2, loc_ptr) @@ -666,9 +663,11 @@ self._read_typeid(r.SCRATCH2, loc_object) self.mc.load_imm(r.SCRATCH, base_type_info + infobits_offset) - assert shift_by == 0 # on PPC64; fixme for PPC32 - self.mc.lbzx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) - self.mc.andix(r.SCRATCH2.value, r.SCRATCH2.value, IS_OBJECT_FLAG & 0xff) + assert shift_by == 0 + self.mc.AGR(r.SCRATCH, r.SCRATCH2) + self.mc.LLGC(r.SCRATCH2, l.addr(0, r.SCRATCH)) + self.mc.LGHI(r.SCRATCH, l.imm(IS_OBJECT_FLAG & 0xff)) + self.mc.NGR(r.SCRATCH2, r.SCRATCH) self.guard_success_cc = c.NE self._emit_guard(op, arglocs[1:]) @@ -683,7 +682,7 @@ self.mc.LG(r.SCRATCH2, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field assert check_imm(offset2) - self.mc.LG(r.SCRATCH2.value, r.SCRATCH2.value, offset2) + self.mc.load(r.SCRATCH2, r.SCRATCH2, offset2) else: # read the typeid self._read_typeid(r.SCRATCH, loc_object) @@ -692,8 +691,11 @@ base_type_info, shift_by, sizeof_ti = ( self.cpu.gc_ll_descr.get_translated_info_for_typeinfo()) self.mc.load_imm(r.SCRATCH2, base_type_info + sizeof_ti + offset2) - assert shift_by == 0 # on PPC64; fixme for PPC32 - self.mc.ldx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) + assert shift_by == 0 + # add index manually + # we cannot use r0 in l.addr(...) + self.mc.AGR(r.SCRATCH, r.SCRATCH2) + self.mc.load(r.SCRATCH2, r.SCRATCH, 0) # get the two bounds to check against vtable_ptr = loc_check_against_class.getint() vtable_ptr = rffi.cast(rclass.CLASSTYPE, vtable_ptr) @@ -706,8 +708,8 @@ assert 0 <= check_min <= 0x7fff assert 0 <= check_diff <= 0xffff # check by doing the unsigned comparison (tmp - min) < (max - min) - self.mc.subi(r.SCRATCH2.value, r.SCRATCH2.value, check_min) - self.mc.cmp_op(0, r.SCRATCH2.value, check_diff, imm=True, signed=False) + self.mc.AGHI(r.SCRATCH2, l.imm(-check_min)) + self.mc.cmp_op(r.SCRATCH2, l.imm(check_diff), imm=True, signed=False) # the guard passes if we get a result of "below or equal" self.guard_success_cc = c.LE self._emit_guard(op, arglocs[2:]) @@ -831,7 +833,7 @@ addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: self.mc.LGR(r.SCRATCH, index_loc) - slef.mc.AGR(r.SCRATCH, offset_loc) + self.mc.AGR(r.SCRATCH, offset_loc) addr_loc = l.addr(0, base_loc, r.SCRATCH) self._memory_read(result_loc, addr_loc, size_loc.value, sign_loc.value) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -286,31 +286,12 @@ raise NoVariableToSpill() return even, odd - def ensure_in_reg(self, var, reg): - """ opposed to ensure_reg, this loads the contents of the variable - directly into reg """ - if isinstance(var, ConstInt): - if -2**15 <= var.value and var.value <= 2*15-1: - self.assembler.mc.LGHI(reg, l.imm(var.value)) - elif -2**31 <= var.value and var.value <= 2*31-1: - self.assembler.mc.LGFI(reg, l.imm(var.value)) - else: - poolloc = self.ensure_reg(a1) - self.assembler.mc.LG(reg, poolloc) - else: - loc = self.loc(var, must_exist=True) - if loc is not reg: - self.assembler.regalloc_mov(loc, reg) - return reg - def force_result_in_even_reg(self, result_v, loc, forbidden_vars=[]): pass def force_result_in_odd_reg(self, result_v, loc, forbidden_vars=[]): pass - - class ZARCHFrameManager(FrameManager): def __init__(self, base_ofs): FrameManager.__init__(self) From pypy.commits at gmail.com Mon Jan 11 10:30:10 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 07:30:10 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: some more translation issues solved Message-ID: <5693ca82.08e11c0a.ce9e2.5624@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81673:c296d7fff786 Date: 2016-01-11 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/c296d7fff786/ Log: some more translation issues solved diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -32,6 +32,9 @@ from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref from rpython.rlib.jit import AsmInfo +class JitFrameTooDeep(Exception): + pass + class AssemblerZARCH(BaseAssembler, OpAssembler): def __init__(self, cpu, translate_support_code=False): @@ -835,10 +838,9 @@ return # move immediate value to memory elif loc.is_stack(): - with scratch_reg(self.mc): - offset = loc.value - self.mc.load_imm(r.SCRATCH, prev_loc) - self.mc.STG(r.SCRATCH, l.addr(offset, r.SPP)) + offset = loc.value + self.mc.load_imm(r.SCRATCH, prev_loc.value) + self.mc.STG(r.SCRATCH, l.addr(offset, r.SPP)) return assert 0, "not supported location" elif prev_loc.is_in_pool(): @@ -858,9 +860,8 @@ # move in memory elif loc.is_stack(): target_offset = loc.value - with scratch_reg(self.mc): - self.mc.load(r.SCRATCH.value, r.SPP, offset) - self.mc.store(r.SCRATCH.value, r.SPP, target_offset) + self.mc.load(r.SCRATCH, r.SPP, offset) + self.mc.store(r.SCRATCH, r.SPP, target_offset) return # move from memory to fp register elif loc.is_fp_reg(): @@ -879,23 +880,16 @@ self.mc.STG(prev_loc, l.addr(offset, r.SPP)) return assert 0, "not supported location" - elif prev_loc.is_imm_float(): - value = prev_loc.getint() + elif prev_loc.is_in_pool(): # move immediate value to fp register if loc.is_fp_reg(): - xxx - with scratch_reg(self.mc): - self.mc.load_imm(r.SCRATCH, value) - self.mc.lfdx(loc.value, 0, r.SCRATCH.value) + self.LD(loc, prev_loc) return # move immediate value to memory elif loc.is_stack(): - xxx - with scratch_reg(self.mc): - offset = loc.value - self.mc.load_imm(r.SCRATCH, value) - self.mc.lfdx(r.FP_SCRATCH.value, 0, r.SCRATCH.value) - self.mc.stfd(r.FP_SCRATCH.value, r.SPP.value, offset) + offset = loc.value + self.mc.LD(r.FP_SCRATCH, prev_loc) + self.mc.STDY(r.FP_SCRATCH, l.addr(offset, r.SPP)) return assert 0, "not supported location" elif prev_loc.is_fp_reg(): diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -448,7 +448,9 @@ else: return arg unpack_arg._annspecialcase_ = 'specialize:arg(1)' - argtypes = func._arguments_ + argtypes = func._arguments_[:] + #while len(argtypes) > 0 and argtypes[-1] == '-': + # argtypes.pop() at = argtypes[0] if len(argtypes) >= 1 else '-' bt = argtypes[1] if len(argtypes) >= 2 else '-' ct = argtypes[2] if len(argtypes) >= 3 else '-' @@ -467,7 +469,7 @@ f = unpack_arg(b, bt) g = unpack_arg(c, ct) return func(self, e, f, g) - def function4(self, a, b): + def function4(self, a, b, c, d): e = unpack_arg(a, at) f = unpack_arg(b, bt) g = unpack_arg(c, ct) diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -97,31 +97,6 @@ def is_imm(self): return True -class ConstFloatLoc(AssemblerLocation): - """This class represents an imm float value which is stored in memory at - the address stored in the field value""" - _immutable_ = True - width = WORD - type = FLOAT - - def __init__(self, value): - self.value = value - - def getint(self): - return self.value - - def __repr__(self): - return "imm_float(stored at %d)" % (self.value) - - def is_imm_float(self): - return True - - def as_key(self): # a real address + 1 - return self.value | 1 - - def is_float(self): - return True - class StackLocation(AssemblerLocation): _immutable_ = True diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -21,6 +21,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (FLOAT, INT, REF, VOID) from rpython.jit.metainterp.resoperation import rop +from rpython.rtyper import rclass from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.objectmodel import we_are_translated @@ -875,7 +876,7 @@ addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: self.mc.LGR(helper_reg, index_loc) - slef.mc.AGR(helper_reg, offset_loc) + self.mc.AGR(helper_reg, offset_loc) addr_loc = l.addr(0, base_loc, helper_reg) return addr_loc diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -81,7 +81,7 @@ def place_in_pool(self, var): offset = self.assembler.pool.get_offset(var) - return l.pool(offset, r.POOL) + return l.pool(offset, float=True) def ensure_reg(self, box, force_in_reg): if isinstance(box, Const): @@ -566,7 +566,7 @@ def ensure_reg_or_any_imm(self, box): if box.type == FLOAT: - return self.fprm.ensure_reg(box) + return self.fprm.ensure_reg(box, True) else: if isinstance(box, Const): return imm(box.getint()) From pypy.commits at gmail.com Mon Jan 11 11:37:23 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 08:37:23 -0800 (PST) Subject: [pypy-commit] buildbot s390x-buildbot: added s390x to config Message-ID: <5693da43.62f3c20a.95bad.7d6c@mx.google.com> Author: Richard Plangger Branch: s390x-buildbot Changeset: r975:df8ff01c8d56 Date: 2016-01-11 17:36 +0100 http://bitbucket.org/pypy/buildbot/changeset/df8ff01c8d56/ Log: added s390x to config diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -28,6 +28,7 @@ 'linux': 100, 'linux64': 50, 'osx': 30, + 's390x': 20, 'win32': 10, 'linux_armhf_raspbian': 7, 'linux_armhf_raring': 6, @@ -41,6 +42,7 @@ 'osx64': 'macosx-x86-64', 'win32': 'win-x86-32', 'freebsd64': 'freebsd-9-x86-64', + 's390x': 's390-64', } DESCRIPTIONS = { From pypy.commits at gmail.com Mon Jan 11 11:39:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 08:39:49 -0800 (PST) Subject: [pypy-commit] buildbot s390x-buildbot: include linux in the value of the platform dict Message-ID: <5693dad5.02371c0a.a53b0.2e88@mx.google.com> Author: Richard Plangger Branch: s390x-buildbot Changeset: r976:aa640461e854 Date: 2016-01-11 17:39 +0100 http://bitbucket.org/pypy/buildbot/changeset/aa640461e854/ Log: include linux in the value of the platform dict diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -42,7 +42,7 @@ 'osx64': 'macosx-x86-64', 'win32': 'win-x86-32', 'freebsd64': 'freebsd-9-x86-64', - 's390x': 's390-64', + 's390x': 'linux-s390-64', } DESCRIPTIONS = { From pypy.commits at gmail.com Mon Jan 11 12:04:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 09:04:44 -0800 (PST) Subject: [pypy-commit] buildbot s390x-buildbot: added more config: Message-ID: <5693e0ac.87c21c0a.239d5.ffff809b@mx.google.com> Author: Richard Plangger Branch: s390x-buildbot Changeset: r977:97612101816f Date: 2016-01-11 18:04 +0100 http://bitbucket.org/pypy/buildbot/changeset/97612101816f/ Log: added more config: own, app-level, jit, pylib for the s390x build slaves. named the slave 'ibm-research' diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -64,6 +64,9 @@ pypyTranslatedAppLevelTestFactory64 = pypybuilds.Translated(lib_python=True, app_tests=True, platform='linux64') +pypyTranslatedAppLevelTestFactoryS390X = pypybuilds.Translated(lib_python=True, + app_tests=True, + platform='s390x') # these are like the two above: the only difference is that they only run # lib-python tests,not -A tests @@ -146,6 +149,15 @@ app_tests=True, ) +pypyJITTranslatedTestFactoryS390X = pypybuilds.Translated( + platform='s390x', + translationArgs=jit_translation_args, + targetArgs=[], + lib_python=True, + pypyjit=True, + app_tests=True, + ) + pypyJITBenchmarkFactory_tannit = pypybuilds.JITBenchmark(host='tannit') pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', host='tannit', @@ -162,18 +174,22 @@ LINUX32 = "own-linux-x86-32" LINUX64 = "own-linux-x86-64" +LINUX_S390X = "own-linux-s390x" MACOSX32 = "own-macosx-x86-32" WIN32 = "own-win-x86-32" APPLVLLINUX32 = "pypy-c-app-level-linux-x86-32" APPLVLLINUX64 = "pypy-c-app-level-linux-x86-64" +APPLVLLINUX_S390X = "pypy-c-app-level-linux-s390x" APPLVLWIN32 = "pypy-c-app-level-win-x86-32" LIBPYTHON_LINUX32 = "pypy-c-lib-python-linux-x86-32" LIBPYTHON_LINUX64 = "pypy-c-lib-python-linux-x86-64" +LIBPYTHON_LINUX_S390X = "pypy-c-lib-python-linux-s390x" JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" +JITLINUX_S390X = 'pypy-c-jit-linux-s390x' JITMACOSX64 = "pypy-c-jit-macosx-x86-64" #JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" JITWIN32 = "pypy-c-jit-win-x86-32" @@ -184,6 +200,7 @@ CPYTHON_64 = "cpython-2-benchmark-x86-64" NUMPY_64 = "numpy-compatibility-linux-x86-64" NUMPY_WIN = "numpy-compatibility-win-x86-32" + # buildbot builder PYPYBUILDBOT = 'pypy-buildbot' JITFREEBSD964 = 'pypy-c-jit-freebsd-9-x86-64' @@ -255,10 +272,13 @@ # linux tests LINUX32, # on tannit32, uses all cores LINUX64, # on speed-old, uses all cores + LINUX_S390X, # vm (ibm-research) JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on speed-old, uses 1 core + JITLINUX_S390X, # vm (ibm-research) #APPLVLLINUX32, # on tannit32, uses 1 core APPLVLLINUX64, # on speed-old, uses 1 core + APPLVLLINUX_S390X, # vm (ibm-research) # other platforms #MACOSX32, # on minime JITWIN32, # on allegro_win32, SalsaSalsa @@ -302,18 +322,22 @@ PYPYBUILDBOT, LINUX32, LINUX64, + LINUX_S390X, MACOSX32, WIN32, APPLVLLINUX32, APPLVLLINUX64, + APPLVLLINUX_S390X, APPLVLWIN32, LIBPYTHON_LINUX32, LIBPYTHON_LINUX64, + LIBPYTHON_LINUX_S390X, JITLINUX32, JITLINUX64, + JITLINUX_S390X, JITMACOSX64, JITWIN32, JITFREEBSD964, @@ -355,6 +379,13 @@ "category": 'linux64', #"locks": [TannitCPU.access('counting')], }, + {"name": LINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": LINUX_S390X, + "factory": pypyOwnTestFactory, + "category": 's390x', + #"locks": [TannitCPU.access('counting')], + }, {"name": APPLVLLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -370,6 +401,13 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, + {"name": APPLVLLINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": APPLVLLINUX_S390X, + "factory": pypyTranslatedAppLevelTestFactoryS390X, + "category": "s390x", + #"locks": [TannitCPU.access('counting')], + }, {"name": LIBPYTHON_LINUX32, "slavenames": ["tannit32"], #"slavenames": ["allegro32"], @@ -385,6 +423,13 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, + {"name": LIBPYTHON_LINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": LIBPYTHON_LINUX_S390X, + "factory": pypyTranslatedLibPythonTestFactory, + "category": "s390x", + #"locks": [TannitCPU.access('counting')], + }, {"name" : JITLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -400,6 +445,13 @@ 'category': 'linux64', #"locks": [TannitCPU.access('counting')], }, + {'name': JITLINUX_S390X, + 'slavenames': ["ibm-research"], + 'builddir': JITLINUX_S390X, + 'factory': pypyJITTranslatedTestFactoryS390X, + 'category': 'linux-s390x', + #"locks": [TannitCPU.access('counting')], + }, {"name": JITBENCH, "slavenames": ["tannit32"], "builddir": JITBENCH, diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -42,7 +42,7 @@ 'osx64': 'macosx-x86-64', 'win32': 'win-x86-32', 'freebsd64': 'freebsd-9-x86-64', - 's390x': 'linux-s390-64', + 's390x': 'linux-s390x', } DESCRIPTIONS = { From pypy.commits at gmail.com Mon Jan 11 12:45:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 09:45:00 -0800 (PST) Subject: [pypy-commit] buildbot default: merged s390x changes Message-ID: <5693ea1c.2851c20a.ab8b3.ffffaec5@mx.google.com> Author: Richard Plangger Branch: Changeset: r978:659fc9f38ab8 Date: 2016-01-11 18:44 +0100 http://bitbucket.org/pypy/buildbot/changeset/659fc9f38ab8/ Log: merged s390x changes diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -64,6 +64,9 @@ pypyTranslatedAppLevelTestFactory64 = pypybuilds.Translated(lib_python=True, app_tests=True, platform='linux64') +pypyTranslatedAppLevelTestFactoryS390X = pypybuilds.Translated(lib_python=True, + app_tests=True, + platform='s390x') # these are like the two above: the only difference is that they only run # lib-python tests,not -A tests @@ -146,6 +149,15 @@ app_tests=True, ) +pypyJITTranslatedTestFactoryS390X = pypybuilds.Translated( + platform='s390x', + translationArgs=jit_translation_args, + targetArgs=[], + lib_python=True, + pypyjit=True, + app_tests=True, + ) + pypyJITBenchmarkFactory_tannit = pypybuilds.JITBenchmark(host='tannit') pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', host='tannit', @@ -162,18 +174,22 @@ LINUX32 = "own-linux-x86-32" LINUX64 = "own-linux-x86-64" +LINUX_S390X = "own-linux-s390x" MACOSX32 = "own-macosx-x86-32" WIN32 = "own-win-x86-32" APPLVLLINUX32 = "pypy-c-app-level-linux-x86-32" APPLVLLINUX64 = "pypy-c-app-level-linux-x86-64" +APPLVLLINUX_S390X = "pypy-c-app-level-linux-s390x" APPLVLWIN32 = "pypy-c-app-level-win-x86-32" LIBPYTHON_LINUX32 = "pypy-c-lib-python-linux-x86-32" LIBPYTHON_LINUX64 = "pypy-c-lib-python-linux-x86-64" +LIBPYTHON_LINUX_S390X = "pypy-c-lib-python-linux-s390x" JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" +JITLINUX_S390X = 'pypy-c-jit-linux-s390x' JITMACOSX64 = "pypy-c-jit-macosx-x86-64" #JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" JITWIN32 = "pypy-c-jit-win-x86-32" @@ -184,6 +200,7 @@ CPYTHON_64 = "cpython-2-benchmark-x86-64" NUMPY_64 = "numpy-compatibility-linux-x86-64" NUMPY_WIN = "numpy-compatibility-win-x86-32" + # buildbot builder PYPYBUILDBOT = 'pypy-buildbot' JITFREEBSD964 = 'pypy-c-jit-freebsd-9-x86-64' @@ -255,10 +272,13 @@ # linux tests LINUX32, # on tannit32, uses all cores LINUX64, # on speed-old, uses all cores + LINUX_S390X, # vm (ibm-research) JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on speed-old, uses 1 core + JITLINUX_S390X, # vm (ibm-research) #APPLVLLINUX32, # on tannit32, uses 1 core APPLVLLINUX64, # on speed-old, uses 1 core + APPLVLLINUX_S390X, # vm (ibm-research) # other platforms #MACOSX32, # on minime JITWIN32, # on allegro_win32, SalsaSalsa @@ -302,18 +322,22 @@ PYPYBUILDBOT, LINUX32, LINUX64, + LINUX_S390X, MACOSX32, WIN32, APPLVLLINUX32, APPLVLLINUX64, + APPLVLLINUX_S390X, APPLVLWIN32, LIBPYTHON_LINUX32, LIBPYTHON_LINUX64, + LIBPYTHON_LINUX_S390X, JITLINUX32, JITLINUX64, + JITLINUX_S390X, JITMACOSX64, JITWIN32, JITFREEBSD964, @@ -355,6 +379,13 @@ "category": 'linux64', #"locks": [TannitCPU.access('counting')], }, + {"name": LINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": LINUX_S390X, + "factory": pypyOwnTestFactory, + "category": 's390x', + #"locks": [TannitCPU.access('counting')], + }, {"name": APPLVLLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -370,6 +401,13 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, + {"name": APPLVLLINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": APPLVLLINUX_S390X, + "factory": pypyTranslatedAppLevelTestFactoryS390X, + "category": "s390x", + #"locks": [TannitCPU.access('counting')], + }, {"name": LIBPYTHON_LINUX32, "slavenames": ["tannit32"], #"slavenames": ["allegro32"], @@ -385,6 +423,13 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, + {"name": LIBPYTHON_LINUX_S390X, + "slavenames": ["ibm-research"], + "builddir": LIBPYTHON_LINUX_S390X, + "factory": pypyTranslatedLibPythonTestFactory, + "category": "s390x", + #"locks": [TannitCPU.access('counting')], + }, {"name" : JITLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -400,6 +445,13 @@ 'category': 'linux64', #"locks": [TannitCPU.access('counting')], }, + {'name': JITLINUX_S390X, + 'slavenames': ["ibm-research"], + 'builddir': JITLINUX_S390X, + 'factory': pypyJITTranslatedTestFactoryS390X, + 'category': 'linux-s390x', + #"locks": [TannitCPU.access('counting')], + }, {"name": JITBENCH, "slavenames": ["tannit32"], "builddir": JITBENCH, diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -28,6 +28,7 @@ 'linux': 100, 'linux64': 50, 'osx': 30, + 's390x': 20, 'win32': 10, 'linux_armhf_raspbian': 7, 'linux_armhf_raring': 6, @@ -41,6 +42,7 @@ 'osx64': 'macosx-x86-64', 'win32': 'win-x86-32', 'freebsd64': 'freebsd-9-x86-64', + 's390x': 'linux-s390x', } DESCRIPTIONS = { From pypy.commits at gmail.com Mon Jan 11 12:45:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 09:45:02 -0800 (PST) Subject: [pypy-commit] buildbot s390x-buildbot: close branch Message-ID: <5693ea1e.2851c20a.ab8b3.ffffaec8@mx.google.com> Author: Richard Plangger Branch: s390x-buildbot Changeset: r979:7aadd1e862b1 Date: 2016-01-11 18:44 +0100 http://bitbucket.org/pypy/buildbot/changeset/7aadd1e862b1/ Log: close branch From pypy.commits at gmail.com Mon Jan 11 13:02:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 11 Jan 2016 10:02:31 -0800 (PST) Subject: [pypy-commit] buildbot default: renamed ibm-research to dje, added seperate nightly to test not default on s390x, but s390x-backend Message-ID: <5693ee37.cf821c0a.fe5ff.ffffae19@mx.google.com> Author: Richard Plangger Branch: Changeset: r980:158bfd82f5f8 Date: 2016-01-11 19:02 +0100 http://bitbucket.org/pypy/buildbot/changeset/158bfd82f5f8/ Log: renamed ibm-research to dje, added seperate nightly to test not default on s390x, but s390x-backend diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -272,13 +272,10 @@ # linux tests LINUX32, # on tannit32, uses all cores LINUX64, # on speed-old, uses all cores - LINUX_S390X, # vm (ibm-research) JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on speed-old, uses 1 core - JITLINUX_S390X, # vm (ibm-research) #APPLVLLINUX32, # on tannit32, uses 1 core APPLVLLINUX64, # on speed-old, uses 1 core - APPLVLLINUX_S390X, # vm (ibm-research) # other platforms #MACOSX32, # on minime JITWIN32, # on allegro_win32, SalsaSalsa @@ -291,6 +288,12 @@ PYPYBUILDBOT # on cobra ], branch='default', hour=0, minute=0), + Nightly("nightly-0-01", [ + LINUX_S390X, # vm (ibm-research) + JITLINUX_S390X, # vm (ibm-research) + APPLVLLINUX_S390X, # vm (ibm-research) + ], branch='s390x-backend', hour=2, minute=0), + Nightly("nightly-1-00", [ JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) @@ -380,7 +383,7 @@ #"locks": [TannitCPU.access('counting')], }, {"name": LINUX_S390X, - "slavenames": ["ibm-research"], + "slavenames": ["dje"], "builddir": LINUX_S390X, "factory": pypyOwnTestFactory, "category": 's390x', @@ -402,7 +405,7 @@ #"locks": [TannitCPU.access('counting')], }, {"name": APPLVLLINUX_S390X, - "slavenames": ["ibm-research"], + "slavenames": ["dje"], "builddir": APPLVLLINUX_S390X, "factory": pypyTranslatedAppLevelTestFactoryS390X, "category": "s390x", @@ -424,7 +427,7 @@ #"locks": [TannitCPU.access('counting')], }, {"name": LIBPYTHON_LINUX_S390X, - "slavenames": ["ibm-research"], + "slavenames": ["dje"], "builddir": LIBPYTHON_LINUX_S390X, "factory": pypyTranslatedLibPythonTestFactory, "category": "s390x", @@ -446,7 +449,7 @@ #"locks": [TannitCPU.access('counting')], }, {'name': JITLINUX_S390X, - 'slavenames': ["ibm-research"], + 'slavenames': ["dje"], 'builddir': JITLINUX_S390X, 'factory': pypyJITTranslatedTestFactoryS390X, 'category': 'linux-s390x', From pypy.commits at gmail.com Mon Jan 11 13:46:21 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 11 Jan 2016 10:46:21 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix fix fix Message-ID: <5693f87d.82e11c0a.48504.4b1a@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81674:a1dd8454d500 Date: 2016-01-11 20:45 +0200 http://bitbucket.org/pypy/pypy/changeset/a1dd8454d500/ Log: fix fix fix diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -19,6 +19,7 @@ from rpython.rlib import rvmprof class MyCode: + _vmprof_unique_id = 0 def __init__(self, name): self.name = name @@ -26,14 +27,18 @@ return code.name code2 = MyCode("py:y:foo:4") + rvmprof.register_code(code2, get_name) try: rvmprof.register_code_object_class(MyCode, get_name) except rvmprof.VMProfPlatformUnsupported, e: py.test.skip(str(e)) + def get_unique_id(code): + return rvmprof.get_unique_id(code) + driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num'], - is_recursive=True) + is_recursive=True, get_unique_id=get_unique_id) @rvmprof.vmprof_execute_code("xcode13", lambda code, num: code) def main(code, num): @@ -45,7 +50,7 @@ while i < num: driver.jit_merge_point(code=code, i=i, s=s, num=num) s += (i << 1) - if s % 3 == 0 and code is not code2: + if i % 3 == 0 and code is not code2: main(code2, 100) i += 1 return s @@ -72,7 +77,7 @@ import pdb pdb.set_trace() - self.meta_interp(f, [1000000]) + self.meta_interp(f, [1000000], inline=True) try: import vmprof except ImportError: diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -623,6 +623,8 @@ raise AttributeError("no 'greens' or 'reds' supplied") if virtualizables is not None: self.virtualizables = virtualizables + if get_unique_id is not None or is_recursive: + assert get_unique_id is not None and is_recursive, "get_unique_id and is_recursive must be specified at the same time" for v in self.virtualizables: assert v in self.reds # if reds are automatic, they won't be passed to jit_merge_point, so diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -11,7 +11,7 @@ #ifdef PYPY_JIT_CODEMAP void *codemap; long current_pos = 0; - intptr_t id; + intptr_t ident; long start_addr = 0; intptr_t addr = (intptr_t)ip; int start, k; @@ -28,21 +28,22 @@ result[n++] = start_addr; start = n; while (n < max_depth) { - id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); - if (id == -1) + ident = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); + if (ident == -1) // finish break; - if (id == 0) + if (ident == 0) continue; // not main codemap result[n++] = VMPROF_JITTED_TAG; - result[n++] = id; + result[n++] = ident; } - k = 0; + k = 1; + while (k < (n - start) / 2) { tmp = result[start + k]; - result[start + k] = result[n - k - 1]; - result[n - k - 1] = tmp; - k++; + result[start + k] = result[n - k]; + result[n - k] = tmp; + k += 2; } #endif return n; From pypy.commits at gmail.com Mon Jan 11 15:48:58 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 11 Jan 2016 12:48:58 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <5694153a.6918c20a.deb73.4a6a@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81675:768918ab952a Date: 2016-01-11 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/768918ab952a/ Log: hg merge default diff too long, truncating to 2000 out of 29542 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -all: pypy-c +all: pypy-c cffi_imports PYPY_EXECUTABLE := $(shell which pypy) URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") @@ -10,6 +10,8 @@ RUNINTERP = $(PYPY_EXECUTABLE) endif +.PHONY: cffi_imports + pypy-c: @echo @echo "====================================================================" @@ -36,3 +38,6 @@ # replaced with an opaque --jobserver option by the time this Makefile # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html + +cffi_imports: pypy-c + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -18,9 +18,9 @@ assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} try: - from __pypy__ import reversed_dict + from __pypy__ import reversed_dict as _reversed_dict except ImportError: - reversed_dict = lambda d: reversed(d.keys()) + _reversed_dict = None # don't have ordered dicts try: from thread import get_ident as _get_ident @@ -46,7 +46,7 @@ ''' def __reversed__(self): - return reversed_dict(self) + return _reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. @@ -116,6 +116,178 @@ return ItemsView(self) +def _compat_with_unordered_dicts(): + # This returns the methods needed in OrderedDict in case the base + # 'dict' class is not actually ordered, like on top of CPython or + # old PyPy or PyPy-STM. + + # ===== Original comments and code follows ===== + # ===== The unmodified methods are not repeated ===== + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + return locals() + +if _reversed_dict is None: + for _key, _value in _compat_with_unordered_dicts().items(): + setattr(OrderedDict, _key, _value) + del _key, _value + ################################################################################ ### namedtuple ################################################################################ diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -8,13 +8,13 @@ def __init__(self): self._builder = StringBuilder() def append(self, string): - try: - self._builder.append(string) - except UnicodeEncodeError: + if (isinstance(string, unicode) and + type(self._builder) is StringBuilder): ub = UnicodeBuilder() ub.append(self._builder.build()) self._builder = ub - ub.append(string) + self.append = ub.append # shortcut only + self._builder.append(string) def build(self): return self._builder.build() diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -604,21 +604,8 @@ def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=_buffer.raw) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except: - import random - bytes = [chr(random.randrange(256)) for i in range(16)] - return UUID(bytes=bytes, version=4) + import os + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.3.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.1" -__version_info__ = (1, 3, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,7 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) -#define _CFFI_NUM_EXPORTS 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) +#define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -201,8 +203,11 @@ the others follow */ } +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); #endif -/********** end CPython-specific section **********/ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -72,6 +72,8 @@ self._cdefsources = [] self._included_ffis = [] self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -104,6 +106,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: + self._cdef_version = object() self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: @@ -589,14 +592,39 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.'): + def compile(self, tmpdir='.', verbose=0): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, **kwds) + source_extension=source_extension, + compiler_verbose=verbose, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result def _load_backend_lib(backend, name, flags): @@ -620,70 +648,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) - return - # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -697,6 +725,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -54,6 +54,7 @@ OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,6 +29,7 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -80,6 +81,47 @@ parts.append(csource) return ''.join(parts) +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise api.CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise api.CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + def _preprocess(csource): # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literal! @@ -103,8 +145,13 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # # Replace "...}" with "__dotdotdotNUM__}". This construction should # occur only at the end of enums; at the end of structs we have "...;}" # and at the end of vararg functions "...);". Also replace "=...[,}]" @@ -257,6 +304,7 @@ break # try: + self._inside_extern_python = False for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -326,13 +374,19 @@ ' #define %s %s' % (key, key, key, value)) + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._inside_extern_python: + self._declare('extern_python ' + decl.name, tp) + else: + self._declare('function ' + decl.name, tp) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) else: if isinstance(node, pycparser.c_ast.Struct): self._get_struct_union_enum_type('struct', node) @@ -348,8 +402,7 @@ tp, quals = self._get_type_and_quals(node, partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) elif (tp.is_integer_type() and hasattr(decl, 'init') and hasattr(decl.init, 'value') and @@ -362,10 +415,23 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif (quals & model.Q_CONST) and not tp.is_array_type: - self._declare('constant ' + decl.name, tp, quals=quals) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = not self._inside_extern_python + assert self._inside_extern_python == ( + decl.name == '__cffi_extern_python_start') else: - self._declare('variable ' + decl.name, tp, quals=quals) + if self._inside_extern_python: + raise api.CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): return self.parse_type_and_quals(cdecl)[0] diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -17,15 +17,16 @@ def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] - allsources.extend(sources) + for src in sources: + allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -35,10 +36,10 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution - import distutils.errors + import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() @@ -48,7 +49,12 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - dist.run_command('build_ext') + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + finally: + distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -1,5 +1,6 @@ -/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */ +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ typedef void *_cffi_opcode_t; @@ -27,6 +28,7 @@ #define _CFFI_OP_DLOPEN_FUNC 35 #define _CFFI_OP_DLOPEN_CONST 37 #define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 #define _CFFI_PRIM_VOID 0 #define _CFFI_PRIM_BOOL 1 @@ -160,6 +162,12 @@ const char *error_message; }; +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + #ifdef _CFFI_INTERNAL static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); static int search_in_globals(const struct _cffi_type_context_s *ctx, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -118,6 +118,7 @@ class Recompiler: + _num_externpy = 0 def __init__(self, ffi, module_name, target_is_python=False): self.ffi = ffi @@ -356,7 +357,10 @@ else: prnt(' NULL, /* no includes */') prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - prnt(' 0, /* flags */') + flags = 0 + if self._num_externpy: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) prnt('};') prnt() # @@ -366,6 +370,11 @@ prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') + if self._num_externpy: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') prnt(' p[0] = (const void *)%s;' % VERSION) prnt(' p[1] = &_cffi_type_context;') prnt('}') @@ -1108,6 +1117,75 @@ GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + + def _generate_cpy_extern_python_decl(self, tp, name): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s", %s };' % (name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise ffiplatform.VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + # ---------- # emitting the opcodes for individual types def _emit_bytecode_VoidType(self, tp, index): @@ -1232,7 +1310,8 @@ return os.path.join(outputdir, *parts), parts def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, **kwds): + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1252,7 +1331,7 @@ cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) return outputfilename diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -21,6 +21,8 @@ import math as _math import struct as _struct +_SENTINEL = object() + def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 @@ -31,6 +33,8 @@ MAXYEAR = 9999 _MINYEARFMT = 1900 +_MAX_DELTA_DAYS = 999999999 + # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day @@ -95,6 +99,15 @@ # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 +_US_PER_US = 1 +_US_PER_MS = 1000 +_US_PER_SECOND = 1000000 +_US_PER_MINUTE = 60000000 +_SECONDS_PER_DAY = 24 * 3600 +_US_PER_HOUR = 3600000000 +_US_PER_DAY = 86400000000 +_US_PER_WEEK = 604800000000 + def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." @@ -271,15 +284,17 @@ def _check_int_field(value): if isinstance(value, int): - return value + return int(value) if not isinstance(value, float): try: value = value.__int__() except AttributeError: pass else: - if isinstance(value, (int, long)): - return value + if isinstance(value, int): + return int(value) + elif isinstance(value, long): + return int(long(value)) raise TypeError('__int__ method should return an integer') raise TypeError('an integer is required') raise TypeError('integer argument expected, got float') @@ -344,75 +359,79 @@ raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) -# This is a start at a struct tm workalike. Goals: -# -# + Works the same way across platforms. -# + Handles all the fields datetime needs handled, without 1970-2038 glitches. -# -# Note: I suspect it's best if this flavor of tm does *not* try to -# second-guess timezones or DST. Instead fold whatever adjustments you want -# into the minutes argument (and the constructor will normalize). +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo -class _tmxxx: +def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d, ignore_overflow) + return y, m, d, hh, mm, ss, us - ordinal = None +def _normalize_date(year, month, day, ignore_overflow=False): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 - def __init__(self, year, month, day, hour=0, minute=0, second=0, - microsecond=0): - # Normalize all the inputs, and store the normalized values. - if not 0 <= microsecond <= 999999: - carry, microsecond = divmod(microsecond, 1000000) - second += carry - if not 0 <= second <= 59: - carry, second = divmod(second, 60) - minute += carry - if not 0 <= minute <= 59: - carry, minute = divmod(minute, 60) - hour += carry - if not 0 <= hour <= 23: - carry, hour = divmod(hour, 24) - day += carry + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) - # That was easy. Now it gets muddy: the proper range for day - # can't be determined without knowing the correct month and year, - # but if day is, e.g., plus or minus a million, the current month - # and year values make no sense (and may also be out of bounds - # themselves). - # Saying 12 months == 1 year should be non-controversial. - if not 1 <= month <= 12: - carry, month = divmod(month-1, 12) - year += carry - month += 1 - assert 1 <= month <= 12 + if not ignore_overflow and not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day - # Now only day can be out of bounds (year may also be out of bounds - # for a datetime object, but we don't care about that here). - # If day is out of bounds, what to do is arguable, but at least the - # method here is principled and explainable. - dim = _days_in_month(year, month) - if not 1 <= day <= dim: - # Move day-1 days from the first of the month. First try to - # get off cheap if we're only one day out of range (adjustments - # for timezone alone can't be worse than that). - if day == 0: # move back a day - month -= 1 - if month > 0: - day = _days_in_month(year, month) - else: - year, month, day = year-1, 12, 31 - elif day == dim + 1: # move forward a day - month += 1 - day = 1 - if month > 12: - month = 1 - year += 1 - else: - self.ordinal = _ymd2ord(year, month, 1) + (day - 1) - year, month, day = _ord2ymd(self.ordinal) - - self.year, self.month, self.day = year, month, day - self.hour, self.minute, self.second = hour, minute, second - self.microsecond = microsecond +def _accum(tag, sofar, num, factor, leftover): + if isinstance(num, (int, long)): + prod = num * factor + rsum = sofar + prod + return rsum, leftover + if isinstance(num, float): + fracpart, intpart = _math.modf(num) + prod = int(intpart) * factor + rsum = sofar + prod + if fracpart == 0.0: + return rsum, leftover + assert isinstance(factor, (int, long)) + fracpart, intpart = _math.modf(factor * fracpart) + rsum += int(intpart) + return rsum, leftover + fracpart + raise TypeError("unsupported type for timedelta %s component: %s" % + (tag, type(num))) class timedelta(object): """Represent the difference between two datetime objects. @@ -433,100 +452,42 @@ """ __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' - def __new__(cls, days=0, seconds=0, microseconds=0, - milliseconds=0, minutes=0, hours=0, weeks=0): - # Doing this efficiently and accurately in C is going to be difficult - # and error-prone, due to ubiquitous overflow possibilities, and that - # C double doesn't have enough bits of precision to represent - # microseconds over 10K years faithfully. The code here tries to make - # explicit where go-fast assumptions can be relied on, in order to - # guide the C implementation; it's way more convoluted than speed- - # ignoring auto-overflow-to-long idiomatic Python could be. + def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL, + milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL): + x = 0 + leftover = 0.0 + if microseconds is not _SENTINEL: + x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover) + if milliseconds is not _SENTINEL: + x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover) + if seconds is not _SENTINEL: + x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover) + if minutes is not _SENTINEL: + x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover) + if hours is not _SENTINEL: + x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover) + if days is not _SENTINEL: + x, leftover = _accum("days", x, days, _US_PER_DAY, leftover) + if weeks is not _SENTINEL: + x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover) + if leftover != 0.0: + x += _round(leftover) + return cls._from_microseconds(x) - # XXX Check that all inputs are ints, longs or floats. + @classmethod + def _from_microseconds(cls, us): + s, us = divmod(us, _US_PER_SECOND) + d, s = divmod(s, _SECONDS_PER_DAY) + return cls._create(d, s, us, False) - # Final values, all integer. - # s and us fit in 32-bit signed ints; d isn't bounded. - d = s = us = 0 + @classmethod + def _create(cls, d, s, us, normalize): + if normalize: + s, us = _normalize_pair(s, us, 1000000) + d, s = _normalize_pair(d, s, 24*3600) - # Normalize everything to days, seconds, microseconds. - days += weeks*7 - seconds += minutes*60 + hours*3600 - microseconds += milliseconds*1000 - - # Get rid of all fractions, and normalize s and us. - # Take a deep breath . - if isinstance(days, float): - dayfrac, days = _math.modf(days) - daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) - assert daysecondswhole == int(daysecondswhole) # can't overflow - s = int(daysecondswhole) - assert days == int(days) - d = int(days) - else: - daysecondsfrac = 0.0 - d = days - assert isinstance(daysecondsfrac, float) - assert abs(daysecondsfrac) <= 1.0 - assert isinstance(d, (int, long)) - assert abs(s) <= 24 * 3600 - # days isn't referenced again before redefinition - - if isinstance(seconds, float): - secondsfrac, seconds = _math.modf(seconds) - assert seconds == int(seconds) - seconds = int(seconds) - secondsfrac += daysecondsfrac - assert abs(secondsfrac) <= 2.0 - else: - secondsfrac = daysecondsfrac - # daysecondsfrac isn't referenced again - assert isinstance(secondsfrac, float) - assert abs(secondsfrac) <= 2.0 - - assert isinstance(seconds, (int, long)) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 2 * 24 * 3600 - # seconds isn't referenced again before redefinition - - usdouble = secondsfrac * 1e6 - assert abs(usdouble) < 2.1e6 # exact value not critical - # secondsfrac isn't referenced again - - if isinstance(microseconds, float): - microseconds = _round(microseconds + usdouble) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = int(microseconds) - else: - microseconds = int(microseconds) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = _round(microseconds + usdouble) - assert isinstance(s, int) - assert isinstance(microseconds, int) - assert abs(s) <= 3 * 24 * 3600 - assert abs(microseconds) < 3.1e6 - - # Just a little bit of carrying possible for microseconds and seconds. - seconds, us = divmod(microseconds, 1000000) - s += seconds - days, s = divmod(s, 24*3600) - d += days - - assert isinstance(d, (int, long)) - assert isinstance(s, int) and 0 <= s < 24*3600 - assert isinstance(us, int) and 0 <= us < 1000000 - - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) + if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS: + raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS)) self = object.__new__(cls) self._days = d @@ -535,6 +496,10 @@ self._hashcode = -1 return self + def _to_microseconds(self): + return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND + + self._microseconds) + def __repr__(self): module = "datetime." if self.__class__ is timedelta else "" if self._microseconds: @@ -562,8 +527,7 @@ def total_seconds(self): """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds) * 10**6 + - self.microseconds) / 10**6 + return self._to_microseconds() / 10**6 # Read-only field accessors @property @@ -585,36 +549,37 @@ if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days + other._days, - self._seconds + other._seconds, - self._microseconds + other._microseconds) + return timedelta._create(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds, + True) return NotImplemented - __radd__ = __add__ - def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days - other._days, - self._seconds - other._seconds, - self._microseconds - other._microseconds) - return NotImplemented - - def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other + return timedelta._create(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds, + True) return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(-self._days, - -self._seconds, - -self._microseconds) + return timedelta._create(-self._days, + -self._seconds, + -self._microseconds, + True) def __pos__(self): - return self + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta._create(self._days, + self._seconds, + self._microseconds, + False) def __abs__(self): if self._days < 0: @@ -623,25 +588,18 @@ return self def __mul__(self, other): - if isinstance(other, (int, long)): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days * other, - self._seconds * other, - self._microseconds * other) - return NotImplemented + if not isinstance(other, (int, long)): + return NotImplemented + usec = self._to_microseconds() + return timedelta._from_microseconds(usec * other) __rmul__ = __mul__ - def _to_microseconds(self): - return ((self._days * (24*3600) + self._seconds) * 1000000 + - self._microseconds) - def __div__(self, other): if not isinstance(other, (int, long)): return NotImplemented usec = self._to_microseconds() - return timedelta(0, 0, usec // other) + return timedelta._from_microseconds(usec // other) __floordiv__ = __div__ @@ -705,9 +663,8 @@ def __reduce__(self): return (self.__class__, self._getstate()) -timedelta.min = timedelta(-999999999) -timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, - microseconds=999999) +timedelta.min = timedelta(-_MAX_DELTA_DAYS) +timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1) timedelta.resolution = timedelta(microseconds=1) class date(object): @@ -948,32 +905,29 @@ # Computations - def _checkOverflow(self, year): - if not MINYEAR <= year <= MAXYEAR: - raise OverflowError("date +/-: result year %d not in %d..%d" % - (year, MINYEAR, MAXYEAR)) + def _add_timedelta(self, other, factor): + y, m, d = _normalize_date( + self._year, + self._month, + self._day + other.days * factor) + return date(y, m, d) def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): - t = _tmxxx(self._year, - self._month, - self._day + other.days) - self._checkOverflow(t.year) - result = date(t.year, t.month, t.day) - return result + return self._add_timedelta(other, 1) return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" - if isinstance(other, timedelta): - return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() - return timedelta(days1 - days2) + return timedelta._create(days1 - days2, 0, 0, False) + if isinstance(other, timedelta): + return self._add_timedelta(other, -1) return NotImplemented def weekday(self): @@ -1340,7 +1294,7 @@ offset = self._tzinfo.utcoffset(None) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1378,7 +1332,7 @@ offset = self._tzinfo.dst(None) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1505,18 +1459,24 @@ A timezone info object may be passed in as well. """ + _check_tzinfo_arg(tz) + converter = _time.localtime if tz is None else _time.gmtime + self = cls._from_timestamp(converter, timestamp, tz) + if tz is not None: + self = tz.fromutc(self) + return self - _check_tzinfo_arg(tz) + @classmethod + def utcfromtimestamp(cls, t): + "Construct a UTC datetime from a POSIX timestamp (like time.time())." + return cls._from_timestamp(_time.gmtime, t, None) - converter = _time.localtime if tz is None else _time.gmtime - - if isinstance(timestamp, int): - us = 0 - else: - t_full = timestamp - timestamp = int(_math.floor(timestamp)) - frac = t_full - timestamp - us = _round(frac * 1e6) + @classmethod + def _from_timestamp(cls, converter, timestamp, tzinfo): + t_full = timestamp + timestamp = int(_math.floor(timestamp)) + frac = t_full - timestamp + us = _round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, @@ -1527,32 +1487,7 @@ us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp) ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) - if tz is not None: - result = tz.fromutc(result) - return result - - @classmethod - def utcfromtimestamp(cls, t): - "Construct a UTC datetime from a POSIX timestamp (like time.time())." - if isinstance(t, int): - us = 0 - else: - t_full = t - t = int(_math.floor(t)) - frac = t_full - t - us = _round(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: - t += 1 - us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - return cls(y, m, d, hh, mm, ss, us) + return cls(y, m, d, hh, mm, ss, us, tzinfo) @classmethod def now(cls, tz=None): @@ -1594,9 +1529,9 @@ hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 - tm = _tmxxx(y, m, d, hh, mm - offset) - y, m, d = tm.year, tm.month, tm.day - hh, mm = tm.hour, tm.minute + mm -= offset + y, m, d, hh, mm, ss, _ = _normalize_datetime( + y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): @@ -1730,7 +1665,7 @@ offset = self._tzinfo.utcoffset(self) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1768,7 +1703,7 @@ offset = self._tzinfo.dst(self) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1859,22 +1794,22 @@ return -1 return diff and 1 or 0 + def _add_timedelta(self, other, factor): + y, m, d, hh, mm, ss, us = _normalize_datetime( + self._year, + self._month, + self._day + other.days * factor, + self._hour, + self._minute, + self._second + other.seconds * factor, + self._microsecond + other.microseconds * factor) + return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo) + def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - t = _tmxxx(self._year, - self._month, - self._day + other.days, - self._hour, - self._minute, - self._second + other.seconds, - self._microsecond + other.microseconds) - self._checkOverflow(t.year) - result = datetime(t.year, t.month, t.day, - t.hour, t.minute, t.second, - t.microsecond, tzinfo=self._tzinfo) - return result + return self._add_timedelta(other, 1) __radd__ = __add__ @@ -1882,16 +1817,15 @@ "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): - return self + -other + return self._add_timedelta(other, -1) return NotImplemented - days1 = self.toordinal() - days2 = other.toordinal() - secs1 = self._second + self._minute * 60 + self._hour * 3600 - secs2 = other._second + other._minute * 60 + other._hour * 3600 - base = timedelta(days1 - days2, - secs1 - secs2, - self._microsecond - other._microsecond) + delta_d = self.toordinal() - other.toordinal() + delta_s = (self._hour - other._hour) * 3600 + \ + (self._minute - other._minute) * 60 + \ + (self._second - other._second) + delta_us = self._microsecond - other._microsecond + base = timedelta._create(delta_d, delta_s, delta_us, True) if self._tzinfo is other._tzinfo: return base myoff = self._utcoffset() diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -88,9 +88,19 @@ # try: unbound_method = getattr(_continulet, methodname) + _tls.leaving = current args, kwds = unbound_method(current, *baseargs, to=target) - finally: _tls.current = current + except: + _tls.current = current + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + _tls.leaving = None + raise + else: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + _tls.leaving = None # if kwds: if args: @@ -122,6 +132,34 @@ return f.f_back.f_back.f_back # go past start(), __switch(), switch() # ____________________________________________________________ +# Recent additions + +GREENLET_USE_GC = True +GREENLET_USE_TRACING = True + +def gettrace(): + return getattr(_tls, 'trace', None) + +def settrace(callback): + try: + prev = _tls.trace + del _tls.trace + except AttributeError: + prev = None + if callback is not None: + _tls.trace = callback + return prev + +def _run_trace_callback(event): + try: + _tls.trace(event, (_tls.leaving, _tls.current)) + except: + # In case of exceptions trace function is removed + if hasattr(_tls, 'trace'): + del _tls.trace + raise + +# ____________________________________________________________ # Internal stuff try: @@ -143,22 +181,32 @@ _tls.current = gmain def _greenlet_start(greenlet, args): - args, kwds = args - _tls.current = greenlet try: - res = greenlet.run(*args, **kwds) - except GreenletExit, e: - res = e + args, kwds = args + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + res = greenlet.run(*args, **kwds) + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet def _greenlet_throw(greenlet, exc, value, tb): - _tls.current = greenlet try: - raise exc, value, tb - except GreenletExit, e: - res = e + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + raise exc, value, tb + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) @@ -138,8 +150,16 @@ current directory. The executable behaves mostly like a normal Python interpreter (see :doc:`cpython_differences`). +Build cffi import libraries for the stdlib +------------------------------------------ -.. _translate-pypy: +Various stdlib modules require a separate build step to create the cffi +import libraries in the `out-of-line API mode`_. This is done by the following +command:: + + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py + +.. _`out-of-line API mode`: http://cffi.readthedocs.org/en/latest/overview.html#real-example-api-level-out-of-line Translating with non-standard options ------------------------------------- @@ -199,4 +219,3 @@ that this is never the case. -.. TODO windows diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,28 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) - rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py +Then get the branch `stmgc-c8`_ of PyPy and run:: -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ + cd pypy/goal + ../../rpython/bin/rpython -Ojit --stm + +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -112,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,7 +1,112 @@ ========================= -What's new in PyPy 4.0.+ +What's new in PyPy 4.1.+ ========================= .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + +.. branch: numpy-1.10 + +Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy +which is now 1.10.2 + +.. branch: osx-flat-namespace + +Fix the cpyext tests on OSX by linking with -flat_namespace + +.. branch: anntype + +Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. + +.. branch: fix-trace-too-long-heuristic From pypy.commits at gmail.com Mon Jan 11 16:21:09 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Mon, 11 Jan 2016 13:21:09 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: Initial commit Message-ID: <56941cc5.17941c0a.b2ce6.ffffe06c@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81677:856e309b020f Date: 2016-01-11 21:55 +0100 http://bitbucket.org/pypy/pypy/changeset/856e309b020f/ Log: Initial commit - some tests still fail because mul() is not allowed to call clone() diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -35,11 +35,17 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate -__all__ = ['W_ListObject', 'make_range_list', 'make_empty_list_with_size'] +__all__ = ['W_ListObject', "make_repeat_list", 'make_range_list', 'make_empty_list_with_size'] UNROLL_CUTOFF = 5 +def make_repeat_list(space, w_item, length): + if length <= 0: + return make_empty_list(space) + strategy = space.fromcache(RepeatListStrategy) + storage = strategy.erase((w_item, length)) + return W_ListObject.from_storage_and_strategy(space, storage, strategy) def make_range_list(space, start, step, length): if length <= 0: @@ -961,7 +967,7 @@ def setslice(self, w_list, start, step, slicelength, w_other): strategy = w_other.strategy - storage = strategy.getstorage_copy(w_other) + storage = strategy.getstorage_copy(w_other) # XXX Why not strategy.copy_into(w_other, w_list) ? w_list.strategy = strategy w_list.lstorage = storage @@ -1031,6 +1037,224 @@ self.sizehint = hint +class RepeatListStrategy(ListStrategy): + """RepeatListStrategy is used when a list is created using the multiplication + operator on iterables. The storage is a two elements tuple, with the + repeated item, and a positive integer storing the length.""" + + erase, unerase = rerased.new_erasing_pair("repeat") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def unwrap(self, w_obj): + return w_obj + + def wrap(self, item): + return item + + def clone(self, w_list): + storage = w_list.lstorage # lstorage is tuple, no need to clone + w_clone = W_ListObject.from_storage_and_strategy(self.space, storage, + self) + return w_clone + + def copy_into(self, w_list, w_other): + w_other.strategy = self + w_other.lstorage = w_list.lstorage + + def _resize_hint(self, w_list, hint): + raise NotImplementedError + + def find(self, w_list, w_item, start, stop): + w_l_item, length = self.unerase(w_list.lstorage) + if length > 0 and start < length and self.space.eq_w(w_l_item, w_item): + return 0 + raise ValueError + + def switch_to_item_strategy(self, w_list, w_item=None): + w_l_item, length = self.unerase(w_list.lstorage) + w_item_type = type(w_l_item) + if w_item is not None and type(w_item) is w_item_type: + if w_item_type is W_IntObject: + strategy = self.space.fromcache(IntegerListStrategy) + item_w = strategy.unwrap(w_l_item) + lstorage = [item_w] * length + w_list.lstorage = strategy.erase(lstorage) + w_list.strategy = strategy + return + elif w_item_type is W_BytesObject: + strategy = self.space.fromcache(BytesListStrategy) + item_w = strategy.unwrap(w_l_item) + lstorage = [item_w] * length + w_list.lstorage = strategy.erase(lstorage) + w_list.strategy = strategy + return + elif w_item_type is W_UnicodeObject: + strategy = self.space.fromcache(UnicodeListStrategy) + item_w = strategy.unwrap(w_l_item) + lstorage = [item_w] * length + w_list.lstorage = strategy.erase(lstorage) + w_list.strategy = strategy + return + elif w_item_type is W_FloatObject: + strategy = self.space.fromcache(FloatListStrategy) + item_w = strategy.unwrap(w_l_item) + lstorage = [item_w] * length + w_list.lstorage = strategy.erase(lstorage) # XXX: use init_from_list_w too ? or define init_from_list_unw() ? + w_list.strategy = strategy + return + # Fall back to ObjectListStrategy + strategy = self.space.fromcache(ObjectListStrategy) + w_list.strategy = strategy + items = [w_l_item] * length + strategy.init_from_list_w(w_list, items) + + def append(self, w_list, w_item): + w_l_item, length = self.unerase(w_list.lstorage) + if type(w_item) is type(w_l_item): + if self.space.eq_w(w_item, w_l_item): + w_list.lstorage = self.erase((w_l_item, length + 1)) + return + self.switch_to_item_strategy(w_list, w_item) + w_list.strategy.append(w_list, w_item) + + def length(self, w_list): + return self.unerase(w_list.lstorage)[1] + + def getslice(self, w_list, start, stop, step, another_length): + w_l_item, length = self.unerase(w_list.lstorage) + if step > 0 and start < stop: + new_length = (stop - start) + if step != 1: + pad = 0 + if new_length % step: + pad = 1 + new_length = pad + new_length / step + elif step < 0 and stop < start: + new_length = (start - stop) + if step != -1: + pad = 0 + if new_length % -step: + pad = 1 + new_length = pad + new_length / -step + elif step == 0: + raise NotImplementedError + else: + return make_empty_list(self.space) + storage = self.erase((w_l_item, new_length)) + return W_ListObject.from_storage_and_strategy( + self.space, storage, self) + + def getitem(self, w_list, index): + if index < self.length(w_list): + return self.unerase(w_list.lstorage)[0] + raise IndexError + + def getstorage_copy(self, w_list): + # tuple is immutable + return w_list.lstorage + + def getitems(self, w_list): + return self.getitems_copy(w_list) + + @jit.look_inside_iff(lambda self, w_list: + jit.loop_unrolling_heuristic(w_list, w_list.length(), + UNROLL_CUTOFF)) + def getitems_copy(self, w_list): + w_l_item, length = self.unerase(w_list.lstorage) + return [w_l_item] * length + + @jit.unroll_safe + def getitems_unroll(self, w_list): + w_l_item, length = self.unerase(w_list.lstorage) + return [w_l_item] * length + + @jit.look_inside_iff(lambda self, w_list: + jit.loop_unrolling_heuristic(w_list, w_list.length(), + UNROLL_CUTOFF)) + def getitems_fixedsize(self, w_list): + return self.getitems_unroll(w_list) + + def pop_end(self, w_list): + w_l_item, length = self.unerase(w_list.lstorage) + if length != 0: + w_list.lstorage = self.erase((w_l_item, length - 1)) + return w_l_item + raise IndexError + + def pop(self, w_list, index): + w_l_item, length = self.unerase(w_list.lstorage) + if -length <= index < length: + w_list.lstorage = self.erase((w_l_item, length - 1)) + return w_l_item + raise IndexError + + def _extend_from_list(self, w_list, w_other): + if w_other.length() == 0: + return + if w_other.strategy is self.space.fromcache(RepeatListStrategy): + w_l_item, l_length = self.unerase(w_list.lstorage) + w_o_item, o_length = self.unerase(w_other.lstorage) + if self.space.eq_w(w_l_item, w_o_item): + w_list.lstorage = self.erase((w_l_item, l_length + o_length)) + return + self.switch_to_item_strategy(w_list) + w_list.extend(w_other) + + def inplace_mul(self, w_list, times): + if times > 0: + w_l_item, length = self.unerase(w_list.lstorage) + w_list.lstorage = self.erase((w_l_item, length * times)) + else: + strategy = self.space.fromcache(EmptyListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase(None) + + def reverse(self, w_list): + return + + def sort(self, w_list, reverse): + return + + def insert(self, w_list, index, w_item): + w_l_item, length = self.unerase(w_list.lstorage) + if index >= length: + raise IndexError + if self.space.eq_w(w_item, w_l_item): + w_list.lstorage = self.erase((w_l_item, length + 1)) + return + self.switch_to_item_strategy(w_list, w_item) + w_list.insert(index, w_item) + + def setitem(self, w_list, index, w_item): + w_l_item, length = self.unerase(w_list.lstorage) + if index >= length: + raise IndexError + if self.space.eq_w(w_item, w_l_item): + return + self.switch_to_item_strategy(w_list, w_item) + w_list.setitem(index, w_item) + + def setslice(self, w_list, start, step, slicelength, w_other): + assert slicelength >= 0 + if w_other.length() == 0: + self.deleteslice(w_list, start, step, slicelength) + return + if w_other.strategy is self.space.fromcache(RepeatListStrategy): + w_l_item, l_length = self.unerase(w_list.lstorage) + w_o_item, o_length = self.unerase(w_other.lstorage) + if self.space.eq_w(w_l_item, w_o_item): + w_list.lstorage = self.erase((w_l_item, l_length + o_length - slicelength)) + return + self.switch_to_item_strategy(w_list) + w_list.setslice(start, step, slicelength, w_other) + + def deleteslice(self, w_list, start, step, slicelength): + if slicelength == 0: + return + w_l_item, length = self.unerase(w_list.lstorage) + w_list.lstorage = self.erase((w_l_item, length - slicelength)) + class BaseRangeListStrategy(ListStrategy): def switch_to_integer_strategy(self, w_list): items = self._getitems_range(w_list, False) @@ -1570,11 +1794,80 @@ return w_item def mul(self, w_list, times): + assert w_list.strategy is not self.space.fromcache(RepeatListStrategy) + if times <= 0: + return make_empty_list(self.space) + if w_list.length() == 1: + w_l_ret = w_list.clone() + if self.switch_to_repeat_strategy_inplace(w_l_ret, times): + return w_l_ret l = self.unerase(w_list.lstorage) return W_ListObject.from_storage_and_strategy( self.space, self.erase(l * times), self) + def switch_to_repeat_strategy_inplace(self, w_list, times): + if w_list.strategy is self.space.fromcache(ObjectListStrategy): + # Already wrapped + w_item = w_list.getitem(0) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(IntegerListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_IntObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(FloatListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_FloatObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(IntOrFloatListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_FloatObject) or isinstance(w_item, W_IntObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(UnicodeListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_UnicodeObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(BytesListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_BytesObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + if w_list.strategy is self.space.fromcache(BaseRangeListStrategy): + w_item = w_list.getitem(0) + assert isinstance(w_item, W_IntObject) + strategy = self.space.fromcache(RepeatListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase((w_item, times)) + return True + return False + def inplace_mul(self, w_list, times): + assert w_list.strategy is not self.space.fromcache(RepeatListStrategy) + if times <= 0: + #XXX switch_to_empty() or clear() + strategy = self.space.fromcache(EmptyListStrategy) + w_list.strategy = strategy + w_list.lstorage = strategy.erase(None) + return + if w_list.length() == 1: + if self.switch_to_repeat_strategy_inplace(w_list, times): + return l = self.unerase(w_list.lstorage) l *= times diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -4,7 +4,7 @@ W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, SimpleRangeListStrategy, make_range_list, UnicodeListStrategy, - IntOrFloatListStrategy) + IntOrFloatListStrategy, make_repeat_list, RepeatListStrategy) from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -588,6 +588,397 @@ l3 = l1.descr_add(self.space, l2) assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(4), self.space.wrap(5)])) + def test_repeatlist_from_other_strategy(self): + l1 = W_ListObject(self.space, [self.space.wrap(42)]) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + l2 = l1.mul(3) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + assert l1.length() == 1 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(42)) + assert l2.strategy == self.space.fromcache(RepeatListStrategy) + assert l2.length() == 3 + assert self.space.eq_w(l2.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(42)) + + def test_repeatlist_from_other_strategy_imul(self): + l1 = W_ListObject(self.space, [self.space.wrap(42)]) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + l1.inplace_mul(3) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(42)) + + def test_repeatlist_from_other_strategy_imul_neg(self): + l1 = W_ListObject(self.space, [self.space.wrap(42)]) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + l1.inplace_mul(-3) + assert l1.strategy == self.space.fromcache(EmptyListStrategy) + assert l1.length() == 0 + + def test_repeatlist_add(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + l2 = make_repeat_list(self.space, self.space.wrap(None), 1) + l3 = l1.descr_add(self.space, l2) + assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + assert l3.strategy == self.space.fromcache(RepeatListStrategy) + l2 = W_ListObject(self.space, [self.space.wrap(None)]) + l3 = l1.descr_add(self.space, l2) + assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + assert l3.strategy == self.space.fromcache(ObjectListStrategy) + + def test_repeatlist_deleteslice(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 42) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 42 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + l1.deleteslice(0, 1, 0) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 42 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + l1.deleteslice(0, 1, 12) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 30 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + l1.deleteslice(0, 1, 30) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 0 + + def test_repeatlist_empty_getitem(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 1) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 1 + l1.pop_end() + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 0 + from pypy.interpreter.error import OperationError + try: + l1.descr_getitem(self.space, self.space.wrap(0)) + except OperationError as e: + if not e.match(self.space, self.space.w_IndexError): + raise + else: + assert False, "Did not raise IndexError" + + def test_repeatlist_setitem(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 3) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + l1.setitem(1, self.space.wrap(None)) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + l1.setitem(1, self.space.wrap(42)) + assert l1.strategy == self.space.fromcache(ObjectListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(42), self.space.wrap(None)])) + + l1 = make_repeat_list(self.space, self.space.wrap(42), 3) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(42), self.space.wrap(42)])) + l1.setitem(1, self.space.wrap(21)) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(21), self.space.wrap(42)])) + + l1 = make_repeat_list(self.space, self.space.wrap(42), 3) + from pypy.interpreter.error import OperationError + try: + l1.setitem(10, self.space.wrap(42)) + except IndexError: + pass + else: + assert False, "Did not raise IndexError" + + def test_repeatlist_insert(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None)])) + l1.insert(0, self.space.wrap(None)) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + l1.insert(1, self.space.wrap(42)) + assert l1.strategy == self.space.fromcache(ObjectListStrategy) + assert l1.length() == 4 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(42), self.space.wrap(None), self.space.wrap(None)])) + + l1 = make_repeat_list(self.space, self.space.wrap(42), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(42)])) + l1.insert(0, self.space.wrap(21)) + assert l1.strategy == self.space.fromcache(IntegerListStrategy) + assert l1.length() == 3 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(21), self.space.wrap(42), self.space.wrap(42)])) + + l1 = make_repeat_list(self.space, self.space.wrap(42), 3) + from pypy.interpreter.error import OperationError + try: + l1.insert(10, self.space.wrap(42)) + except IndexError: + pass + else: + assert False, "Did not raise IndexError" + + def test_repeatlist_reverse(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None)])) + l1.reverse() + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None)])) + + def test_repeatlist_setslice(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 3) + l2 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 3 + assert l2.strategy == self.space.fromcache(RepeatListStrategy) + assert l2.length() == 2 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + assert self.space.eq_w(l2, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None)])) + l1.setslice(1, 1, 1, l2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 4 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + + l1 = make_repeat_list(self.space, self.space.wrap(42), 2) + l2 = make_range_list(self.space, 1, 3, 3) + l1.setslice(1, 1, 1, l2) + assert l1.strategy == self.space.fromcache(ObjectListStrategy) + assert l1.length() == 4 + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(1), self.space.wrap(4), self.space.wrap(7)])) + + def test_repeatlist_mul(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + l2 = l1.mul(3) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l2.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + assert l2.length() == 6 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert self.space.eq_w(l2.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert self.space.eq_w(l2, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None), + self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + + def test_repeatlist_mul_other(self): + l1 = W_ListObject(self.space, [self.space.wrap(None)]) + assert l1.strategy == self.space.fromcache(ObjectListStrategy) + assert l1.length() == 1 + l2 = l1.mul(3) + assert l1.strategy == self.space.fromcache(ObjectListStrategy) + assert l2.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 1 + assert l2.length() == 3 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert self.space.eq_w(l2.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert self.space.eq_w(l2, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + + def test_repeatlist_imul(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 2 + l1.inplace_mul(3) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 6 + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None), + self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + + def test_repeatlist_append(self): + # Same item + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + l1.append(self.space.wrap(None)) + assert self.space.eq_w(l1, W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(None), self.space.wrap(None)])) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + # Same type => IntegerListStrategy + l2 = make_repeat_list(self.space, self.space.wrap(42), 2) + assert l2.strategy == self.space.fromcache(RepeatListStrategy) + l2.append(self.space.wrap(21)) + assert l2.strategy == self.space.fromcache(IntegerListStrategy) + assert self.space.eq_w(l2, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(42), self.space.wrap(21)])) + # Different type => ObjectListStrategy + l3 = make_repeat_list(self.space, self.space.wrap(42), 2) + l3.append(self.space.wrap(21.0)) + assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(42), self.space.wrap(21.0)])) + assert l3.strategy == self.space.fromcache(ObjectListStrategy) + # Stays at ObjectListStrategy + l = make_repeat_list(self.space, self.space.wrap(None), 2) + assert isinstance(l.strategy, RepeatListStrategy) + l.append(self.space.wrap(None)) + assert isinstance(l.strategy, RepeatListStrategy) + l.append(self.space.wrap("string")) + assert isinstance(l.strategy, ObjectListStrategy) + l.append(self.space.wrap(None)) + assert isinstance(l.strategy, ObjectListStrategy) + # Other types + l = make_repeat_list(self.space, self.space.wrap(b'qwerty'), 2) + assert isinstance(l.strategy, RepeatListStrategy) + l.append(self.space.wrap(b'azerty')) + assert isinstance(l.strategy, BytesListStrategy) + l = make_repeat_list(self.space, self.space.wrap(u'azerty'), 2) + assert isinstance(l.strategy, RepeatListStrategy) + l.append(self.space.wrap(u'qwerty')) + assert isinstance(l.strategy, UnicodeListStrategy) + l = make_repeat_list(self.space, self.space.wrap(42.0), 2) + assert isinstance(l.strategy, RepeatListStrategy) + l.append(self.space.wrap(21.0)) + assert isinstance(l.strategy, FloatListStrategy) + + def test_repeatlist_find(self): + # Same item + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + idx = l1.find(self.space.wrap(None)) + assert idx == 0 + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + # Same integer + l2 = make_repeat_list(self.space, self.space.wrap(42), 2) + idx = l2.find(self.space.wrap(42)) + assert idx == 0 + # Should still find it + idx = l2.find(self.space.wrap(42.0)) + assert idx == 0 + # Different integer + try: + l2.find(self.space.wrap(21)) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + # Different type + try: + l2.find(self.space.wrap(21.0)) + except ValueError: + pass + else: + assert False, "Did not raise ValueError" + + def test_repeatlist_base(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 0) + assert l1.strategy == self.space.fromcache(EmptyListStrategy) + l1 = make_repeat_list(self.space, self.space.wrap(None), 1000000000) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 1000000000 + + def test_repeatlist_big(self): + l1 = make_repeat_list(self.space, self.space.wrap(42), 1) + w_forty_two = self.space.wrap(42) + + i = 100000 - 1 + while i > 0: + l1.append(w_forty_two) + i -= 1 + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 100000 + + i = 100000 - 1 + while i > 0: + item = l1.pop(0) + assert self.space.eq_w(item, w_forty_two) + i -= 1 + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + assert l1.length() == 1 + + def test_repeatlist_getitem(self): + l1 = make_repeat_list(self.space, self.space.wrap(None), 2) + assert self.space.eq_w(l1.descr_getitem(self.space, self.space.wrap(0)), self.space.wrap(None)) + assert l1.strategy == self.space.fromcache(RepeatListStrategy) + + def test_repeatlist_extend_with_empty(self): + l = make_repeat_list(self.space, self.space.wrap(None), 2) + assert isinstance(l.strategy, RepeatListStrategy) + l.extend(W_ListObject(self.space, [])) + assert isinstance(l.strategy, RepeatListStrategy) + + def test_repeatlist_pop(self): + l = make_repeat_list(self.space, self.space.wrap(None), 42) + assert isinstance(l.strategy, RepeatListStrategy) + v = l.pop(5) + assert self.space.eq_w(v, self.space.wrap(None)) + assert isinstance(l.strategy, RepeatListStrategy) + + l = make_repeat_list(self.space, self.space.wrap(42), 5) + assert isinstance(l.strategy, RepeatListStrategy) + assert l.length() == 5 + v = l.pop(0) + assert l.length() == 4 + assert self.space.eq_w(v, self.space.wrap(42)) + assert isinstance(l.strategy, RepeatListStrategy) + v = l.pop(l.length() - 1) + assert l.length() == 3 + assert self.space.eq_w(v, self.space.wrap(42)) + assert isinstance(l.strategy, RepeatListStrategy) + v = l.pop_end() + assert l.length() == 2 + assert self.space.eq_w(v, self.space.wrap(42)) + assert isinstance(l.strategy, RepeatListStrategy) + l.pop(0) + l.pop(0) + assert l.length() == 0 + assert isinstance(l.strategy, RepeatListStrategy) + try: + l.pop(0) + except IndexError: + pass + else: + assert False, "Did not raise IndexError" + assert l.length() == 0 + assert isinstance(l.strategy, RepeatListStrategy) + try: + l.pop_end() + except IndexError: + pass + else: + assert False, "Did not raise IndexError" + assert l.length() == 0 + assert isinstance(l.strategy, RepeatListStrategy) + + def test_repeatlist_getslice(self): + l = make_repeat_list(self.space, self.space.wrap(0), 42) + assert isinstance(l.strategy, RepeatListStrategy) + s = l.getslice(3, 6, 1, 2) + assert isinstance(l.strategy, RepeatListStrategy) + assert isinstance(s.strategy, RepeatListStrategy) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 6, 2, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 7, 2, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 8, 2, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 8, 3, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 9, 3, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 10, 3, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0), self.space.wrap(0)])) + s = l.getslice(3, 6, -1, 2) + assert isinstance(l.strategy, RepeatListStrategy) + assert isinstance(s.strategy, EmptyListStrategy) + s = l.getslice(-3, -6, -1, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0), self.space.wrap(0)])) + assert isinstance(l.strategy, RepeatListStrategy) + assert isinstance(s.strategy, RepeatListStrategy) + s = l.getslice(6, 3, -1, 2) + assert self.space.eq_w(s, W_ListObject(self.space, [self.space.wrap(0), self.space.wrap(0), self.space.wrap(0)])) + assert isinstance(l.strategy, RepeatListStrategy) + assert isinstance(s.strategy, RepeatListStrategy) + + def test_repeatlist_recursion(self): + rl = make_repeat_list(self.space, self.space.wrap(42), 2) + ol = W_ListObject(self.space, [self.space.wrap(None), self.space.wrap(False)]) + # This should not endlessly recurse into a AULS.append() -> OLS.is_correct_type() loop + ol.append(rl) + assert ol.length() == 3 + assert self.space.eq_w(ol, W_ListObject(self.space, [self.space.wrap(None), + self.space.wrap(False), W_ListObject(self.space, [self.space.wrap(42), self.space.wrap(42)])])) + def test_unicode(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) assert isinstance(l1.strategy, BytesListStrategy) From pypy.commits at gmail.com Mon Jan 11 16:21:07 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Mon, 11 Jan 2016 13:21:07 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: Create a new branch to test the repeatlist strategy Message-ID: <56941cc3.11181c0a.17ceb.ffffe146@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81676:6dff0c69e127 Date: 2016-01-11 21:51 +0100 http://bitbucket.org/pypy/pypy/changeset/6dff0c69e127/ Log: Create a new branch to test the repeatlist strategy From pypy.commits at gmail.com Mon Jan 11 17:13:18 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 11 Jan 2016 14:13:18 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Fix this test by using the new entrypoint API. Message-ID: <569428fe.46bb1c0a.6fe6.ffffec85@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81678:03e0a99cf23b Date: 2016-01-11 22:58 +0100 http://bitbucket.org/pypy/pypy/changeset/03e0a99cf23b/ Log: Fix this test by using the new entrypoint API. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -534,14 +534,14 @@ assert fc(1) == 0 def test_entrypoints(self): - from rpython.rlib.entrypoint import entrypoint + from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.translator.interactive import Translation def f(args): return 3 key = 'test_entrypoints42' - @entrypoint(key, [int], 'foobar') + @entrypoint_highlevel(key, [int], 'foobar') def g(x): return x + 42 From pypy.commits at gmail.com Mon Jan 11 17:13:20 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 11 Jan 2016 14:13:20 -0800 (PST) Subject: [pypy-commit] pypy default: Make GraphAnalyzer return the bottom result when encountering a null function pointer. Message-ID: <56942900.c4b61c0a.55951.fffff4b6@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81679:58ef780a3875 Date: 2016-01-11 23:11 +0100 http://bitbucket.org/pypy/pypy/changeset/58ef780a3875/ Log: Make GraphAnalyzer return the bottom result when encountering a null function pointer. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -80,12 +80,17 @@ funcobj = op.args[0].value._obj except DelayedPointer: return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() if getattr(funcobj, 'external', None) is not None: x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = get_graph(op.args[0], self.translator) + graph = funcobj.graph assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -65,3 +65,14 @@ op = SpaceOperation('direct_call', [c_f], None) analyzer = BoolGraphAnalyzer(t) assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) From pypy.commits at gmail.com Mon Jan 11 18:17:34 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 11 Jan 2016 15:17:34 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5694380e.41dfc20a.752aa.38a4@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r692:2af2311f676c Date: 2016-01-12 00:17 +0100 http://bitbucket.org/pypy/pypy.org/changeset/2af2311f676c/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62669 of $105000 (59.7%) + $62717 of $105000 (59.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Mon Jan 11 23:14:56 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 11 Jan 2016 20:14:56 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <56947dc0.cb571c0a.75fb7.3007@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81680:2aff8b331a70 Date: 2016-01-12 02:44 +0100 http://bitbucket.org/pypy/pypy/changeset/2aff8b331a70/ Log: hg merge default diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -80,12 +80,17 @@ funcobj = op.args[0].value._obj except DelayedPointer: return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() if getattr(funcobj, 'external', None) is not None: x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = get_graph(op.args[0], self.translator) + graph = funcobj.graph assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -65,3 +65,14 @@ op = SpaceOperation('direct_call', [c_f], None) analyzer = BoolGraphAnalyzer(t) assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) From pypy.commits at gmail.com Tue Jan 12 03:28:42 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 00:28:42 -0800 (PST) Subject: [pypy-commit] pypy default: Remove the need for passing "config" to StackletThread(). Message-ID: <5694b93a.50371c0a.aef22.7ae1@mx.google.com> Author: Armin Rigo Branch: Changeset: r81681:beb301687f9b Date: 2016-01-12 09:27 +0100 http://bitbucket.org/pypy/pypy/changeset/beb301687f9b/ Log: Remove the need for passing "config" to StackletThread(). diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) From pypy.commits at gmail.com Tue Jan 12 03:43:55 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 12 Jan 2016 00:43:55 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: Fix test, was: Message-ID: <5694bccb.44e21c0a.ac823.ffff8cb6@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81682:e2d983c38082 Date: 2016-01-12 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/e2d983c38082/ Log: Fix test, was: NameError: free variable 'd' referenced before assignment in enclosing scope diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -76,12 +76,12 @@ def test_remove_mutating(self): collections = self.collections + d = collections.deque([MutatingCmp()]) class MutatingCmp(object): def __eq__(self, other): d.clear() return True - d = collections.deque([MutatingCmp()]) raises(IndexError, d.remove, 1) def test_remove_failing(self): From pypy.commits at gmail.com Tue Jan 12 06:45:45 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Jan 2016 03:45:45 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: translation issues mostly and added functions and missing implementations. zrpy_releasegil translates Message-ID: <5694e769.ccaa1c0a.51e03.351f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81683:4d6c20801028 Date: 2016-01-12 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/4d6c20801028/ Log: translation issues mostly and added functions and missing implementations. zrpy_releasegil translates diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -22,8 +22,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) -from rpython.jit.metainterp.history import (INT, REF, FLOAT, - TargetToken) +from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id from rpython.rlib import rgc @@ -515,7 +514,7 @@ # registers). mc = InstrBuilder() # - mc._push_core_regs_to_jitframe([r.r14]) # store the link on the jit frame + self._push_core_regs_to_jitframe(mc, [r.r14]) # store the link on the jit frame # Do the call mc.push_std_frame() mc.LGR(r.r2, r.SP) @@ -530,7 +529,7 @@ # else we have an exception mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # - mc._pop_core_regs_from_jitframe([r.r14]) # restore the link on the jit frame + self._pop_core_regs_from_jitframe(mc, [r.r14]) # restore the link on the jit frame # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) # @@ -556,13 +555,14 @@ assert check_imm_value(diff) mc = self.mc - mc.load_imm(r.SCRATCH2, endaddr) # li r0, endaddr - mc.branch_absolute(self.stack_check_slowpath) - mc.load(r.SCRATCH, r.SCRATCH2, 0) # lg r1, [end] - mc.load(r.SCRATCH2, r.SCRATCH2, diff)# lg r0, [length] - mc.SGR(r.SCRATCH, r.SP) # sub r1, SP - mc.cmp_op(r.SCRATCH, r.SCRATCH2, signed=False) - mc.bgtctrl() + mc.load_imm(r.SCRATCH, endaddr) # li r0, endaddr + mc.load(r.SCRATCH2, r.SCRATCH, 0) # lg r1, [end] + mc.load(r.SCRATCH, r.SCRATCH, diff)# lg r0, [length] + mc.SGR(r.SCRATCH2, r.SP) # sub r1, SP + mc.load_imm(r.r14, self.stack_check_slowpath) + off = l.imm(mc.CLGRJ_byte_count + mc.BASR_byte_count) + mc.CLGRJ(r.SCRATCH2, r.SCRATCH, c.GT, off) + mc.BASR(r.r14, r.r14) def _check_frame_depth(self, mc, gcmap): """ check if the frame is of enough depth to follow this bridge. @@ -731,6 +731,7 @@ def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: + assert isinstance(targettoken, TargetToken) targettoken._ll_loop_code += rawstart self.target_tokens_currently_compiling = None @@ -813,6 +814,21 @@ ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.STG(r.SCRATCH, l.addr(ofs, r.SPP)) + def break_long_loop(self): + # If the loop is too long, the guards in it will jump forward + # more than 32 KB. We use an approximate hack to know if we + # should break the loop here with an unconditional "b" that + # jumps over the target code. + jmp_pos = self.mc.currpos() + self.mc.reserve_cond_jump() + + self.write_pending_failure_recoveries() + + currpos = self.mc.currpos() + pmc = OverwritingBuilder(self.mc, jmp_pos, 1) + pmc.BRCL(c.ANY, l.imm(currpos - jmp_pos)) + pmc.overwrite() + def _assemble(self, regalloc, inputargs, operations): self._regalloc = regalloc self.guard_success_cc = c.cond_none @@ -883,7 +899,7 @@ elif prev_loc.is_in_pool(): # move immediate value to fp register if loc.is_fp_reg(): - self.LD(loc, prev_loc) + self.mc.LD(loc, prev_loc) return # move immediate value to memory elif loc.is_stack(): @@ -1263,7 +1279,7 @@ if check_imm_value(size): mc.AGHI(r.RSZ, l.imm(size)) else: - mc.load_imm(r.SCRATCH2, l.imm(size)) + mc.load_imm(r.SCRATCH2, size) mc.AGR(r.RSZ, r.SCRATCH2) mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -126,8 +126,8 @@ if gcrootmap.is_shadow_stack and self.is_call_release_gil: # in this mode, RSHADOWOLD happens to contain the shadowstack # top at this point, so reuse it instead of loading it again - xxx - ssreg = self.RSHADOWOLD + # RSHADOWOLD is moved to the scratch reg just before restoring r8 + ssreg = r.SCRATCH self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) def emit_raw_call(self): @@ -200,7 +200,8 @@ self.mc.trap() # boehm: patched with a BEQ: jump if r12 is zero self.mc.write('\x00'*4) # shadowstack: patched with BNE instead - if self.asm.cpu.gc_ll_descr.gcrootmap: + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: # When doing a call_release_gil with shadowstack, there # is the risk that the 'rpy_fastgil' was free but the # current shadowstack can be the one of a different @@ -219,7 +220,7 @@ self.mc.STG(r.r12, l.addr(0,RFASTGILPTR)) pmc = OverwritingBuilder(self.mc, bne_location, 1) - pmc.BCRL(c.NE, self.mc.currpos() - bne_location) + pmc.BRCL(c.NE, l.imm(self.mc.currpos() - bne_location)) pmc.overwrite() # # Yes, we need to call the reacqgil() function. @@ -246,6 +247,9 @@ pmc.overwrite() # restore the values that might have been overwritten + if gcrootmap: + if gcrootmap.is_shadow_stack and self.is_call_release_gil: + self.mc.LGR(r.SCRATCH, RSHADOWOLD) self.mc.LMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) diff --git a/rpython/jit/backend/zarch/conditions.py b/rpython/jit/backend/zarch/conditions.py --- a/rpython/jit/backend/zarch/conditions.py +++ b/rpython/jit/backend/zarch/conditions.py @@ -2,6 +2,7 @@ from rpython.rlib.objectmodel import specialize class ConditionLocation(loc.ImmLocation): + _immutable_ = True def __repr__(self): s = "" if self.value & 0x10 != 0: diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -464,27 +464,48 @@ e = unpack_arg(a, at) f = unpack_arg(b, bt) return func(self, e, f) + def function2_last_default(self, a): + e = unpack_arg(a, at) + return func(self, e, 0) def function3(self, a, b, c): e = unpack_arg(a, at) f = unpack_arg(b, bt) g = unpack_arg(c, ct) return func(self, e, f, g) + def function3_last_default(self, a, b): + e = unpack_arg(a, at) + f = unpack_arg(b, bt) + return func(self, e, f, 0) def function4(self, a, b, c, d): e = unpack_arg(a, at) f = unpack_arg(b, bt) g = unpack_arg(c, ct) h = unpack_arg(d, dt) return func(self, e, f, g, h) + def function4_last_default(self, a, b, c): + e = unpack_arg(a, at) + f = unpack_arg(b, bt) + g = unpack_arg(c, ct) + return func(self, e, f, g, 0) if len(argtypes) == 0: function = function0 elif len(argtypes) == 1: function = function1 elif len(argtypes) == 2: function = function2 + if argtypes[1] == '-': + # e.g. SPM/IPM + function = function2_last_default elif len(argtypes) == 3: function = function3 + if argtypes[2] == '-': + # e.g. FIEBR or CGEBR ignore the last element + function = function3_last_default elif len(argtypes) == 4: function = function4 + if argtypes[3] == '-': + # e.g. FIEBR or CGEBR ignore the last element + function = function4_last_default else: assert 0, "implement function for argtypes %s" % (argtypes,) function.__name__ = mnemonic diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -251,6 +251,25 @@ emit_call_f = _genop_call emit_call_n = _genop_call + def _emit_threadlocalref_get(self, op, arglocs, regalloc): + [resloc] = arglocs + offset = op.getarg(1).getint() # getarg(0) == 'threadlocalref_get' + calldescr = op.getdescr() + size = calldescr.get_result_size() + sign = calldescr.is_result_signed() + # + # This loads the stack location THREADLOCAL_OFS into a + # register, and then read the word at the given offset. + # It is only supported if 'translate_support_code' is + # true; otherwise, the execute_token() was done with a + # dummy value for the stack location THREADLOCAL_OFS + # + assert self.cpu.translate_support_code + assert resloc.is_reg() + assert check_imm_value(offset) + self.mc.LG(resloc, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + self._memory_read(resloc, l.addr(offset, resloc), size, sign) + def _emit_math_sqrt(self, op, arglocs, regalloc): l0, res = arglocs self.mc.SQDBR(res, l0) @@ -641,10 +660,9 @@ self._read_typeid(r.SCRATCH2, loc_ptr) assert 0 <= expected_typeid <= 0x7fffffff # 4 bytes are always enough if expected_typeid > 0xffff: # if 2 bytes are not enough - self.mc.subis(r.SCRATCH2.value, r.SCRATCH2.value, - expected_typeid >> 16) + self.mc.AGHI(r.SCRATCH2, l.imm(-(expected_typeid >> 16))) expected_typeid = expected_typeid & 0xffff - self.mc.cmp_op(0, r.SCRATCH2.value, expected_typeid, + self.mc.cmp_op(r.SCRATCH2, l.imm(expected_typeid), imm=True, signed=False) def emit_guard_gc_type(self, op, arglocs, regalloc): @@ -1026,8 +1044,8 @@ def _call_assembler_check_descr(self, value, tmploc): ofs = self.cpu.get_ofs_of_frame_field('jf_descr') self.mc.LG(r.SCRATCH, l.addr(ofs, r.r2)) - if check_imm(value): - self.mc.cmp_op(r.SCRATCH, value, imm=True) + if check_imm_value(value): + self.mc.cmp_op(r.SCRATCH, l.imm(value), imm=True) else: self.mc.load_imm(r.SCRATCH2, value) self.mc.cmp_op(r.SCRATCH, r.SCRATCH2, imm=False) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -4,6 +4,7 @@ TargetToken) from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import Const from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.jit.backend.zarch.arch import (WORD, RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET) @@ -168,16 +169,14 @@ print('pool: %s at offset: %d' % (val, offset)) if val.is_constant(): if val.type == FLOAT: - self.overwrite_64(mc, offset, float2longlong(val.value)) + self.overwrite_64(mc, offset, float2longlong(val.getfloat())) elif val.type == INT: - i64 = rffi.cast(lltype.Signed, val.value) + i64 = rffi.cast(lltype.Signed, val.getint()) self.overwrite_64(mc, offset, i64) else: assert val.type == REF - i64 = rffi.cast(lltype.Signed, val.value) + i64 = rffi.cast(lltype.Signed, val.getref_base()) self.overwrite_64(mc, offset, i64) - else: - pass for guard_token in pending_guard_tokens: descr = guard_token.faildescr diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -163,8 +163,6 @@ self._check_type(var) prev_loc = self.loc(var, must_exist=must_exist) var2 = TempVar() - if prev_loc is self.frame_reg: - return prev_loc if bind_first: loc, loc2 = self.force_allocate_reg_pair(bindvar, var2, self.temp_boxes) else: @@ -437,7 +435,7 @@ return r.SPP else: # else, return a regular register (not SPP). - if self.rm.reg_bindings.get(var, None) != None: + if self.rm.reg_bindings.get(var, None) is not None: return self.rm.loc(var, must_exist=True) return self.rm.force_allocate_reg(var) diff --git a/rpython/jit/backend/zarch/test/test_zrpy_releasegil.py b/rpython/jit/backend/zarch/test/test_zrpy_releasegil.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_zrpy_releasegil.py @@ -0,0 +1,5 @@ +from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests + + +class TestShadowStack(ReleaseGILTests): + gcrootfinder = "shadowstack" From pypy.commits at gmail.com Tue Jan 12 08:51:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Jan 2016 05:51:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed print statement, ztranslation_basic is now passing Message-ID: <569504fc.520e1c0a.9f3f7.fffffa2f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81684:0d6d52e34a38 Date: 2016-01-12 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/0d6d52e34a38/ Log: removed print statement, ztranslation_basic is now passing diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py --- a/rpython/jit/backend/zarch/helper/assembler.py +++ b/rpython/jit/backend/zarch/helper/assembler.py @@ -17,7 +17,6 @@ # Support for NaNs: S390X sets condition register to 0x3 (unordered) # as soon as any of the operands is NaN condition = c.prepare_float_condition(condition) - print("condition is:", condition) self.flush_cc(condition, arglocs[2]) diff --git a/rpython/jit/backend/zarch/test/test_ztranslation_basic.py b/rpython/jit/backend/zarch/test/test_ztranslation_basic.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_ztranslation_basic.py @@ -0,0 +1,3 @@ +from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTest +class TestTranslationZARCH(TranslationTest): + pass From pypy.commits at gmail.com Tue Jan 12 10:58:01 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 07:58:01 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Found out how to control the extension of distutils-built libraries. Message-ID: <56952289.cdb81c0a.ad17d.2be0@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2562:d3c439508948 Date: 2016-01-12 16:57 +0100 http://bitbucket.org/cffi/cffi/changeset/d3c439508948/ Log: Found out how to control the extension of distutils-built libraries. Use a more suitable value by default when building embedded libraries. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -620,13 +620,25 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, ext=None): + """Values recognized for the ext parameter: + + - 'capi': use distutils' default to build CPython C API extensions + - 'system': use the system's default for dynamic libraries (.so/.dll) + - '.FOO': exactly .FOO + + The default is 'capi' when building a non-embedded C API extension, + and 'system' when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") + if ext not in (None, 'capi', 'system') and '.' not in ext: + raise ValueError("bad value for 'ext' argument: %r" % (ext,)) module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, + target_extention=ext, source_extension=source_extension, compiler_verbose=verbose, **kwds) diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,12 +21,13 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extention='capi'): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extention) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +37,19 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _build(tmpdir, ext, compiler_verbose=0, target_extention='capi'): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -50,11 +63,25 @@ # try: old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extention == 'capi': + pass # keep the values already in 'SO' and 'EXT_SUFFIX' + else: + if target_extention == 'system': + if sys.platform == 'win32': + target_extention = '.dll' + else: + target_extention = '.so' + _restore_val('SO', target_extention) + _restore_val('EXT_SUFFIX', target_extention) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1359,7 +1359,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target_extention=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1378,10 +1378,16 @@ ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: + if target_extention is None: + if ffi._embedding is None: + target_extention = 'capi' + else: + target_extention = 'system' cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extention) finally: os.chdir(cwd) return outputfilename From pypy.commits at gmail.com Tue Jan 12 11:11:21 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 08:11:21 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Test and fix Message-ID: <569525a9.ea5ec20a.cd36.ffffdd08@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2563:32a664384ab3 Date: 2016-01-12 17:10 +0100 http://bitbucket.org/cffi/cffi/changeset/32a664384ab3/ Log: Test and fix diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -78,6 +78,8 @@ _restore_val('EXT_SUFFIX', target_extention) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) _restore_val('SO', old_SO) @@ -86,8 +88,6 @@ distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -68,6 +68,10 @@ match = re.compile(r"\bFILENAME: (.+)").search(output) assert match dynamic_lib_name = match.group(1) + if sys.platform == 'win32': + assert dynamic_lib_name.endswith('_cffi.dll') + else: + assert dynamic_lib_name.endswith('_cffi.so') self._compiled_modules[name] = dynamic_lib_name return self._compiled_modules[name] From pypy.commits at gmail.com Tue Jan 12 11:55:18 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 08:55:18 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: More Windows hacks Message-ID: <56952ff6.c5321c0a.c7d6b.41d2@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2564:3a8c3afacaa2 Date: 2016-01-12 17:55 +0100 http://bitbucket.org/cffi/cffi/changeset/3a8c3afacaa2/ Log: More Windows hacks diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,13 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0, target_extention='capi'): +def compile(tmpdir, ext, compiler_verbose=0, target_extention=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: outputfilename = _build(tmpdir, ext, compiler_verbose, - target_extention) + target_extention, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -49,7 +50,19 @@ if value is Ellipsis: del config_vars[name] -def _build(tmpdir, ext, compiler_verbose=0, target_extention='capi'): +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extention=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -62,10 +75,17 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 old_SO = _save_val('SO') old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extention is None: + if embedding: + target_extention = 'system' + else: + target_extention = 'capi' if target_extention == 'capi': pass # keep the values already in 'SO' and 'EXT_SUFFIX' else: @@ -84,6 +104,8 @@ distutils.log.set_threshold(old_level) _restore_val('SO', old_SO) _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1365,7 +1365,8 @@ if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: - if ffi._embedding is not None: + embedding = (ffi._embedding is not None) + if embedding: ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, @@ -1378,16 +1379,12 @@ ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: - if target_extention is None: - if ffi._embedding is None: - target_extention = 'capi' - else: - target_extention = 'system' cwd = os.getcwd() try: os.chdir(tmpdir) outputfilename = ffiplatform.compile('.', ext, compiler_verbose, - target_extention) + target_extention, + embedding=embedding) finally: os.chdir(cwd) return outputfilename From pypy.commits at gmail.com Tue Jan 12 12:07:36 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:07:36 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: two tests pass on windows! progress Message-ID: <569532d8.4a5ec20a.af75e.4e87@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2565:aeaadd68f498 Date: 2016-01-12 18:04 +0200 http://bitbucket.org/cffi/cffi/changeset/aeaadd68f498/ Log: two tests pass on windows! progress diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -58,6 +58,7 @@ MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler MSVCCompiler._remove_visual_c_ref = \ MSVCCompiler._remove_visual_c_ref_CFFI_BAK diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -4,9 +4,6 @@ from testing.udir import udir import cffi -if sys.platform == 'win32': - py.test.skip("it 'should' work on Windows, but I did not manage at all" - " to make these tests pass. Please help") local_dir = os.path.dirname(os.path.abspath(__file__)) _link_error = '?' @@ -34,6 +31,8 @@ def setup_method(self, meth): check_lib_python_found(str(udir.ensure('embedding', dir=1))) self._path = udir.join('embedding', meth.__name__) + if sys.platform == "win32": + self._compiled_modules.clear() # workaround def get_path(self): return str(self._path.ensure(dir=1)) @@ -90,7 +89,7 @@ libfiles = [] for m in modules: m = os.path.basename(m) - assert m.endswith('.pyd') + assert m.endswith('.dll') libfiles.append('Release\\%s.lib' % m[:-4]) modules = libfiles elif threads: From pypy.commits at gmail.com Tue Jan 12 12:12:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:12:58 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Skip this test on windows Message-ID: <5695341a.c2351c0a.bef4f.ffffbce0@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2566:510d0c89e6d7 Date: 2016-01-12 18:12 +0100 http://bitbucket.org/cffi/cffi/changeset/510d0c89e6d7/ Log: Skip this test on windows diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py --- a/testing/embedding/test_performance.py +++ b/testing/embedding/test_performance.py @@ -1,5 +1,10 @@ +import sys from testing.embedding.test_basic import EmbeddingTests +if sys.platform == 'win32': + import py + py.test.skip("written with pthreads") + class TestPerformance(EmbeddingTests): def test_perf_single_threaded(self): From pypy.commits at gmail.com Tue Jan 12 12:19:52 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:19:52 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: use declspec(dllimport/dllexport) on windows Message-ID: <569535b8.0c2e1c0a.4ac7f.50aa@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2567:22426c847e7f Date: 2016-01-12 18:19 +0100 http://bitbucket.org/cffi/cffi/changeset/22426c847e7f/ Log: use declspec(dllimport/dllexport) on windows diff --git a/testing/embedding/add_recursive-test.c b/testing/embedding/add_recursive-test.c --- a/testing/embedding/add_recursive-test.c +++ b/testing/embedding/add_recursive-test.c @@ -1,7 +1,13 @@ #include -extern int add_rec(int, int); -extern int (*my_callback)(int); +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); static int some_callback(int x) { diff --git a/testing/embedding/add_recursive.py b/testing/embedding/add_recursive.py --- a/testing/embedding/add_recursive.py +++ b/testing/embedding/add_recursive.py @@ -24,7 +24,9 @@ """) ffi.set_source("_add_recursive_cffi", """ -int (*my_callback)(int); +/* use CFFI_DLLEXPORT: on windows, it expands to __declspec(dllexport), + which is needed to export a variable from a dll */ +CFFI_DLLEXPORT int (*my_callback)(int); """) fn = ffi.compile(verbose=True) From pypy.commits at gmail.com Tue Jan 12 12:40:43 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:40:43 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: (untested) trying to have the multithreaded tests run on windows Message-ID: <56953a9b.cf0b1c0a.91856.54f9@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2568:083815c0ba89 Date: 2016-01-12 18:40 +0100 http://bitbucket.org/cffi/cffi/changeset/083815c0ba89/ Log: (untested) trying to have the multithreaded tests run on windows diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -78,6 +78,7 @@ path = self.get_path() filename = '%s.c' % name shutil.copy(os.path.join(local_dir, filename), path) + shutil.copy(os.path.join(local_dir, 'thread-test.h'), path) import distutils.ccompiler curdir = os.getcwd() try: diff --git a/testing/embedding/test_performance.py b/testing/embedding/test_performance.py --- a/testing/embedding/test_performance.py +++ b/testing/embedding/test_performance.py @@ -3,7 +3,7 @@ if sys.platform == 'win32': import py - py.test.skip("written with pthreads") + py.test.skip("written with POSIX functions") class TestPerformance(EmbeddingTests): diff --git a/testing/embedding/thread-test.h b/testing/embedding/thread-test.h new file mode 100644 --- /dev/null +++ b/testing/embedding/thread-test.h @@ -0,0 +1,62 @@ +/************************************************************/ +#ifndef _MSC_VER +/************************************************************/ + + +#include +#include + + +/************************************************************/ +#else +/************************************************************/ + + +/* Very quick and dirty, just what I need for these tests. + Don't use directly in any real code! +*/ + +#include +#include + +typedef HANDLE sem_t; +typedef HANDLE pthread_t; + +int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + assert(value == 0); + *sem = CreateSemaphore(NULL, 0, 999, NULL); + return *sem ? 0 : -1; +} + +int sem_post(sem_t *sem) +{ + return ReleaseSemaphore(*res, 1, NULL) ? 0 : -1; +} + +int sem_wait(sem_t *sem) +{ + WaitForSingleObject(*res, INFINITE); + return 0; +} + +DWORD WINAPI myThreadProc(LPVOID lpParameter) +{ + void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; + start_routine(NULL); + return 0; +} + +int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) +{ + assert(arg == NULL); + *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); + return *thread ? 0 : -1; +} + + +/************************************************************/ +#endif +/************************************************************/ diff --git a/testing/embedding/thread1-test.c b/testing/embedding/thread1-test.c --- a/testing/embedding/thread1-test.c +++ b/testing/embedding/thread1-test.c @@ -1,7 +1,6 @@ #include -#include -#include #include +#include "thread-test.h" #define NTHREADS 10 diff --git a/testing/embedding/thread2-test.c b/testing/embedding/thread2-test.c --- a/testing/embedding/thread2-test.c +++ b/testing/embedding/thread2-test.c @@ -1,7 +1,6 @@ #include -#include -#include #include +#include "thread-test.h" extern int add1(int, int); extern int add2(int, int, int); diff --git a/testing/embedding/thread3-test.c b/testing/embedding/thread3-test.c --- a/testing/embedding/thread3-test.c +++ b/testing/embedding/thread3-test.c @@ -1,7 +1,6 @@ #include -#include -#include #include +#include "thread-test.h" extern int add2(int, int, int); extern int add3(int, int, int, int); diff --git a/testing/embedding/tlocal-test.c b/testing/embedding/tlocal-test.c --- a/testing/embedding/tlocal-test.c +++ b/testing/embedding/tlocal-test.c @@ -1,7 +1,6 @@ #include -#include -#include #include +#include "thread-test.h" #define NTHREADS 10 From pypy.commits at gmail.com Tue Jan 12 12:42:09 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:42:09 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: fix Message-ID: <56953af1.6953c20a.10e25.ffff8f12@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2569:c91000e0af90 Date: 2016-01-12 18:41 +0100 http://bitbucket.org/cffi/cffi/changeset/c91000e0af90/ Log: fix diff --git a/testing/embedding/thread-test.h b/testing/embedding/thread-test.h --- a/testing/embedding/thread-test.h +++ b/testing/embedding/thread-test.h @@ -32,12 +32,12 @@ int sem_post(sem_t *sem) { - return ReleaseSemaphore(*res, 1, NULL) ? 0 : -1; + return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; } int sem_wait(sem_t *sem) { - WaitForSingleObject(*res, INFINITE); + WaitForSingleObject(*sem, INFINITE); return 0; } From pypy.commits at gmail.com Tue Jan 12 12:44:22 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Jan 2016 09:44:22 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Simplify get_external_function_sandbox_graph() a bit Message-ID: <56953b76.84c9c20a.2d630.ffffe680@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81685:dd074136bef8 Date: 2016-01-11 20:15 +0000 http://bitbucket.org/pypy/pypy/changeset/dd074136bef8/ Log: Simplify get_external_function_sandbox_graph() a bit diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -15,7 +15,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.ansi_print import ansi_log @@ -100,10 +99,17 @@ STDERR = 2 with rffi.scoped_str2charp(msg + '\n') as buf: writeall_not_sandboxed(STDERR, buf, len(msg) + 1) - raise RuntimeError(msg) # XXX in RPython, the msg is ignored at the moment + raise RuntimeError(msg) # XXX in RPython, the msg is ignored + +def make_stub(fnname, msg): + log.WARNING(msg) + def execute(*args): + not_implemented_stub(msg) + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute dump_string = rmarshal.get_marshaller(str) -load_int = rmarshal.get_loader(int) +load_int = rmarshal.get_loader(int) def get_external_function_sandbox_graph(fnobj, db, force_stub=False): """Build the graph of a helper trampoline function to be used @@ -117,11 +123,9 @@ else: fnname = fnobj._name if hasattr(fnobj, 'graph'): - # get the annotation of the input arguments and the result graph = fnobj.graph - annotator = db.translator.annotator - args_s = [annotator.binding(v) for v in graph.getargs()] - s_result = annotator.binding(graph.getreturnvar()) + args_s = [v.annotation for v in graph.getargs()] + s_result = graph.getreturnvar().annotation else: # pure external function - fall back to the annotations # corresponding to the ll types @@ -129,37 +133,33 @@ args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] s_result = lltype_to_annotation(FUNCTYPE.RESULT) - try: - if force_stub: # old case - don't try to support suggested_primitive - raise NotImplementedError("sandboxing for external function '%s'" - % (fnname,)) + if force_stub: # old case - don't try to support suggested_primitive + msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) + execute = make_stub(fnname, msg) + else: + try: + dump_arguments = rmarshal.get_marshaller(tuple(args_s)) + load_result = rmarshal.get_loader(s_result) + except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e: + msg = "Cannot sandbox function '%s': %s" % (fnname, e) + execute = make_stub(fnname, msg) + else: + def execute(*args): + # marshal the function name and input arguments + buf = [] + dump_string(buf, fnname) + dump_arguments(buf, args) + # send the buffer and wait for the answer + loader = sandboxed_io(buf) + # decode the answer + result = load_result(loader) + loader.check_finished() + return result + execute.__name__ = 'sandboxed_%s' % (fnname,) + return _annotate(db.translator.rtyper, execute, args_s, s_result) - dump_arguments = rmarshal.get_marshaller(tuple(args_s)) - load_result = rmarshal.get_loader(s_result) - - except (NotImplementedError, - rmarshal.CannotMarshal, - rmarshal.CannotUnmarshall), e: - msg = 'Not Implemented: %s' % (e,) - log.WARNING(msg) - def execute(*args): - not_implemented_stub(msg) - - else: - def execute(*args): - # marshal the function name and input arguments - buf = [] - dump_string(buf, fnname) - dump_arguments(buf, args) - # send the buffer and wait for the answer - loader = sandboxed_io(buf) - # decode the answer - result = load_result(loader) - loader.check_finished() - return result - execute = func_with_new_name(execute, 'sandboxed_' + fnname) - - ann = MixLevelHelperAnnotator(db.translator.rtyper) - graph = ann.getgraph(execute, args_s, s_result) +def _annotate(rtyper, f, args_s, s_result): + ann = MixLevelHelperAnnotator(rtyper) + graph = ann.getgraph(f, args_s, s_result) ann.finish() return graph From pypy.commits at gmail.com Tue Jan 12 12:44:23 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Jan 2016 09:44:23 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Split get_external_function_sandbox_graph() and kill force_stub flag Message-ID: <56953b77.2815c20a.d8200.fffff65b@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81686:eddfb3e100dd Date: 2016-01-12 17:43 +0000 http://bitbucket.org/pypy/pypy/changeset/eddfb3e100dd/ Log: Split get_external_function_sandbox_graph() and kill force_stub flag diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -916,15 +916,15 @@ def sandbox_stub(fnobj, db): # unexpected external function for --sandbox translation: replace it # with a "Not Implemented" stub. - graph = rsandbox.get_external_function_sandbox_graph(fnobj, db, - force_stub=True) + graph = rsandbox.get_sandbox_stub(fnobj, db.translator.rtyper) return make_funcgen(graph, db) def sandbox_transform(fnobj, db): # for --sandbox: replace a function like os_open_llimpl() with # code that communicates with the external process to ask it to # perform the operation. - graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) + graph = rsandbox.get_external_function_sandbox_graph( + fnobj, db.translator.rtyper) return make_funcgen(graph, db) def need_sandboxing(fnobj): diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -108,10 +108,24 @@ execute.__name__ = 'sandboxed_%s' % (fnname,) return execute +def sig_ll(fnobj): + FUNCTYPE = lltype.typeOf(fnobj) + args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] + s_result = lltype_to_annotation(FUNCTYPE.RESULT) + return args_s, s_result + dump_string = rmarshal.get_marshaller(str) load_int = rmarshal.get_loader(int) -def get_external_function_sandbox_graph(fnobj, db, force_stub=False): +def get_sandbox_stub(fnobj, rtyper): + """Build always-raising graph for unsupported external function.""" + fnname = fnobj._name + args_s, s_result = sig_ll(fnobj) + msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) + execute = make_stub(fnname, msg) + return _annotate(rtyper, execute, args_s, s_result) + +def get_external_function_sandbox_graph(fnobj, rtyper): """Build the graph of a helper trampoline function to be used in place of real calls to the external function 'fnobj'. The trampoline marshals its input arguments, dumps them to STDOUT, @@ -129,34 +143,28 @@ else: # pure external function - fall back to the annotations # corresponding to the ll types - FUNCTYPE = lltype.typeOf(fnobj) - args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] - s_result = lltype_to_annotation(FUNCTYPE.RESULT) + args_s, s_result = sig_ll(fnobj) - if force_stub: # old case - don't try to support suggested_primitive - msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) + try: + dump_arguments = rmarshal.get_marshaller(tuple(args_s)) + load_result = rmarshal.get_loader(s_result) + except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e: + msg = "Cannot sandbox function '%s': %s" % (fnname, e) execute = make_stub(fnname, msg) else: - try: - dump_arguments = rmarshal.get_marshaller(tuple(args_s)) - load_result = rmarshal.get_loader(s_result) - except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e: - msg = "Cannot sandbox function '%s': %s" % (fnname, e) - execute = make_stub(fnname, msg) - else: - def execute(*args): - # marshal the function name and input arguments - buf = [] - dump_string(buf, fnname) - dump_arguments(buf, args) - # send the buffer and wait for the answer - loader = sandboxed_io(buf) - # decode the answer - result = load_result(loader) - loader.check_finished() - return result - execute.__name__ = 'sandboxed_%s' % (fnname,) - return _annotate(db.translator.rtyper, execute, args_s, s_result) + def execute(*args): + # marshal the function name and input arguments + buf = [] + dump_string(buf, fnname) + dump_arguments(buf, args) + # send the buffer and wait for the answer + loader = sandboxed_io(buf) + # decode the answer + result = load_result(loader) + loader.check_finished() + return result + execute.__name__ = 'sandboxed_%s' % (fnname,) + return _annotate(rtyper, execute, args_s, s_result) def _annotate(rtyper, f, args_s, s_result): ann = MixLevelHelperAnnotator(rtyper) From pypy.commits at gmail.com Tue Jan 12 12:55:04 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 09:55:04 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: improve test, but it is failing under windows Message-ID: <56953df8.84e31c0a.1d455.5d8d@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2570:4d841e4f5175 Date: 2016-01-12 18:54 +0100 http://bitbucket.org/cffi/cffi/changeset/4d841e4f5175/ Log: improve test, but it is failing under windows diff --git a/testing/embedding/tlocal-test.c b/testing/embedding/tlocal-test.c --- a/testing/embedding/tlocal-test.c +++ b/testing/embedding/tlocal-test.c @@ -18,8 +18,8 @@ assert((expected % 1000) == 42); for (i=0; i<10; i++) { - x = add1(40, 2); - assert(x == expected); + x = add1(50, i); + assert(x == expected + 8 + i); } status = sem_post(&done); From pypy.commits at gmail.com Tue Jan 12 12:56:45 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 12 Jan 2016 09:56:45 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed another 3 tests (translated call release gil) Message-ID: <56953e5d.624fc20a.13d6d.4257@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81687:2504ba34aacd Date: 2016-01-12 18:56 +0100 http://bitbucket.org/pypy/pypy/changeset/2504ba34aacd/ Log: fixed another 3 tests (translated call release gil) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -84,6 +84,7 @@ # t = TranslationContext() t.config.translation.gc = gc + t.config.translation.lldebug = True # XXX debug if gc != 'boehm': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -556,14 +556,21 @@ mc = self.mc mc.load_imm(r.SCRATCH, endaddr) # li r0, endaddr - mc.load(r.SCRATCH2, r.SCRATCH, 0) # lg r1, [end] - mc.load(r.SCRATCH, r.SCRATCH, diff)# lg r0, [length] - mc.SGR(r.SCRATCH2, r.SP) # sub r1, SP + mc.load(r.r14, r.SCRATCH, 0) # lg r14, [end] + mc.load(r.SCRATCH, r.SCRATCH, diff) # lg r0, [length] + mc.LGR(r.SCRATCH2, r.SP) + mc.SGR(r.SCRATCH2, r.r14) # sub r1, (SP - r14) + jmp_pos = self.mc.currpos() + self.mc.reserve_cond_jump() + mc.load_imm(r.r14, self.stack_check_slowpath) - off = l.imm(mc.CLGRJ_byte_count + mc.BASR_byte_count) - mc.CLGRJ(r.SCRATCH2, r.SCRATCH, c.GT, off) mc.BASR(r.r14, r.r14) + currpos = self.mc.currpos() + pmc = OverwritingBuilder(mc, jmp_pos, 1) + pmc.CLGRJ(r.SCRATCH2, r.SCRATCH, c.GT, l.imm(currpos - jmp_pos)) + pmc.overwrite() + def _check_frame_depth(self, mc, gcmap): """ check if the frame is of enough depth to follow this bridge. Otherwise reallocate the frame in a helper. diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -127,7 +127,7 @@ # in this mode, RSHADOWOLD happens to contain the shadowstack # top at this point, so reuse it instead of loading it again # RSHADOWOLD is moved to the scratch reg just before restoring r8 - ssreg = r.SCRATCH + ssreg = None # r.SCRATCH self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) def emit_raw_call(self): @@ -135,15 +135,12 @@ # save the SP back chain self.mc.STG(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) # move the frame pointer - self.mc.AGHI(r.SP, l.imm(-self.subtracted_to_sp)) + self.mc.LAY(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) self.mc.raw_call() - # restore the pool! - offset = self.asm.pool.pool_start - self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(offset)) def restore_stack_pointer(self): if self.subtracted_to_sp != 0: - self.mc.AGHI(r.SP, l.imm(self.subtracted_to_sp)) + self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) def load_result(self): assert (self.resloc is None or @@ -246,7 +243,7 @@ pmc.BRCL(c.EQ, l.imm(self.mc.currpos() - b1_location)) pmc.overwrite() - # restore the values that might have been overwritten + # restore the values that is void after LMG if gcrootmap: if gcrootmap.is_shadow_stack and self.is_call_release_gil: self.mc.LGR(r.SCRATCH, RSHADOWOLD) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -393,6 +393,13 @@ r = reg + # 2-6 + self.pushpop_jitframe([r.r2, r.r3, r.r4, r.r5, r.r6, r.r8, r.r10]) + assert stored == [(r.r2, r.r6), (r.r8,), (r.r10,)] + assert stored == loaded + stored = [] + loaded = [] + # two sequences 10-11, 13-14 self.pushpop_jitframe([r.r10, r.r11, r.r13, r.r14]) assert stored == [(r.r10, r.r11), (r.r13, r.r14)] diff --git a/rpython/jit/backend/zarch/tool/__init__.py b/rpython/jit/backend/zarch/tool/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/tool/viewcode.py b/rpython/jit/backend/zarch/tool/viewcode.py new file mode 100755 --- /dev/null +++ b/rpython/jit/backend/zarch/tool/viewcode.py @@ -0,0 +1,427 @@ +#! /usr/bin/env python +""" +Viewer for the output of compiled programs generating code. +Use on the log files created with 'PYPYLOG=jit-backend-dump:log'. + +Try: + ./viewcode.py --text log # text only disassembly + ./viewcode.py log # also includes a pygame viewer +""" + +import sys +print(sys.path) +import new +import operator +import py +import re +import subprocess +from bisect import bisect_left + +# don't use pypy.tool.udir here to avoid removing old usessions which +# might still contain interesting executables +udir = py.path.local.make_numbered_dir(prefix='viewcode-', keep=2) +tmpfile = str(udir.join('dump.tmp')) + +# hack hack +import rpython.tool +mod = new.module('rpython.tool.udir') +mod.udir = udir +sys.modules['rpython.tool.udir'] = mod +rpython.tool.udir = mod + +# ____________________________________________________________ +# Some support code from Psyco. There is more over there, +# I am porting it in a lazy fashion... See py-utils/xam.py + +def machine_code_dump(data, originaddr, backend_name, label_list=None): + objdump = ('objdump -EB --target=binary --architecture=s390:64-bit ' + '--adjust-vma=%(origin)d -D %(file)s') + # + f = open(tmpfile, 'wb') + f.write(data) + f.close() + p = subprocess.Popen(objdump % { + 'file': tmpfile, + 'origin': originaddr, + }, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + assert not p.returncode, ('Encountered an error running objdump: %s' % + stderr) + # drop some objdump cruft + lines = stdout.splitlines(True)[6:] # drop some objdump cruft + return format_code_dump_with_labels(originaddr, lines, label_list) + +def format_code_dump_with_labels(originaddr, lines, label_list): + from rpython.rlib.rarithmetic import r_uint + if not label_list: + label_list = [] + originaddr = r_uint(originaddr) + itlines = iter(lines) + yield itlines.next() # don't process the first line + for lbl_start, lbl_name in label_list: + for line in itlines: + addr, _ = line.split(':', 1) + addr = int(addr, 16) + if addr >= originaddr+lbl_start: + yield '\n' + if lbl_name is None: + yield '--end of the loop--\n' + else: + yield str(lbl_name) + '\n' + yield line + break + yield line + # yield all the remaining lines + for line in itlines: + yield line + +def load_symbols(filename): + # the program that lists symbols, and the output it gives + symbollister = 'nm %s' + re_symbolentry = re.compile(r'([0-9a-fA-F]+)\s\w\s(.*)') + # + print 'loading symbols from %s...' % (filename,) + symbols = {} + p = subprocess.Popen(symbollister % filename, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + assert not p.returncode, ('Encountered an error running nm: %s' % + stderr) + for line in stdout.splitlines(True): + match = re_symbolentry.match(line) + if match: + addr = long(match.group(1), 16) + name = match.group(2) + if name.startswith('pypy_g_'): + name = '\xb7' + name[7:] + symbols[addr] = name + print '%d symbols found' % (len(symbols),) + return symbols + +re_addr = re.compile(r'[\s,$]0x([0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]+)') +re_lineaddr = re.compile(r'\s*0?x?([0-9a-fA-F]+)') + +def lineaddresses(line): + result = [] + i = 0 + while 1: + match = re_addr.search(line, i) + if not match: + break + i = match.end() + addr = long(match.group(1), 16) + result.append(addr) + return result + +# ____________________________________________________________ + +class CodeRange(object): + fallthrough = False + + def __init__(self, world, addr, data): + self.world = world + self.addr = addr + self.data = data + + def __repr__(self): + return '' % (hex(self.addr), len(self.data)) + + def touches(self, other): + return (self .addr < other.addr + len(other.data) and + other.addr < self .addr + len(self.data)) + + def update_from_old(self, other): + if other.addr < self.addr: + delta = self.addr - other.addr + assert delta <= len(other.data) + self.addr -= delta + self.data = other.data[:delta] + self.data + self_end = self .addr + len(self .data) + other_end = other.addr + len(other.data) + if other_end > self_end: + extra = other_end - self_end + assert extra <= len(other.data) + self.data += other.data[-extra:] + + def cmpop(op): + def _cmp(self, other): + if not isinstance(other, CodeRange): + return NotImplemented + return op((self.addr, self.data), (other.addr, other.data)) + return _cmp + __lt__ = cmpop(operator.lt) + __le__ = cmpop(operator.le) + __eq__ = cmpop(operator.eq) + __ne__ = cmpop(operator.ne) + __gt__ = cmpop(operator.gt) + __ge__ = cmpop(operator.ge) + del cmpop + + def disassemble(self): + if not hasattr(self, 'text'): + lines = machine_code_dump(self.data, self.addr, self.world.backend_name) + lines = list(lines) + # instead of adding symbol names in the dumps we could + # also make the 0xNNNNNNNN addresses be red and show the + # symbol name when the mouse is over them + logentries = self.world.logentries + symbols = self.world.symbols + for i, line in enumerate(lines): + match = re_lineaddr.match(line) + if match: + addr = long(match.group(1), 16) + logentry = logentries.get(addr) + if logentry: + lines[i] = '\n%s\n%s' % (logentry, lines[i]) + for addr in lineaddresses(line): + sym = symbols.get(addr) + if sym: + lines[i] = '%s\t%s\n' % (lines[i].rstrip(), sym) + self.text = ''.join(lines) + return self.text + + def findjumps(self): + text = self.disassemble() + lines = text.splitlines() + line = '' + for i, line in enumerate(lines): + if '\tj' not in line: # poor heuristic to recognize lines that + continue # could be jump instructions + addrs = list(lineaddresses(line)) + if not addrs: + continue + addr = addrs[-1] + final = '\tjmp' in line + yield i, addr, final + if self.fallthrough and '\tret' not in line: + yield len(lines), self.addr + len(self.data), True + + +class World(object): + + def __init__(self): + self.ranges = [] + self.labeltargets = {} + self.jumps = {} + self.symbols = {} + self.logentries = {} + self.backend_name = None + self.executable_name = None + + def parse(self, f, textonly=True): + for line in f: + if line.startswith('BACKEND '): + self.backend_name = line.split(' ')[1].strip() + elif line.startswith('CODE_DUMP '): + pieces = line.split() + assert pieces[1].startswith('@') + assert pieces[2].startswith('+') + if len(pieces) == 3: + continue # empty line + baseaddr = long(pieces[1][1:], 16) & 0xFFFFFFFFL + offset = int(pieces[2][1:]) + addr = baseaddr + offset + data = pieces[3].replace(':', '').decode('hex') + coderange = CodeRange(self, addr, data) + i = bisect_left(self.ranges, coderange) + j = i + while i>0 and coderange.touches(self.ranges[i-1]): + coderange.update_from_old(self.ranges[i-1]) + i -= 1 + while j= fnext: + sys.stderr.write("%d%%" % int(f*100.0)) + fnext += 0.1 + sys.stderr.write(".") + sys.stderr.write("100%") + # split blocks at labeltargets + t = self.labeltargets + #print t + for r in self.ranges: + #print r.addr, r.addr + len(r.data) + for i in range(r.addr + 1, r.addr + len(r.data)): + if i in t: + #print i + ofs = i - r.addr + self.ranges.append(CodeRange(self, i, r.data[ofs:])) + r.data = r.data[:ofs] + r.fallthrough = True + try: + del r.text + except AttributeError: + pass + break + # hack hack hacked + sys.stderr.write("\n") + + def show(self, showtext=True, showgraph=True): + if showgraph: + g1 = Graph('codedump') + self.ranges.sort() + for r in self.ranges: + disassembled = r.disassemble() + if showtext: + print disassembled + if showgraph: + text, width = tab2columns(disassembled) + text = '0x%x\n\n%s' % (r.addr, text) + g1.emit_node('N_%x' % r.addr, shape="box", label=text, + width=str(width*0.1125)) + for lineno, targetaddr, final in r.findjumps(): + if final: + color = "black" + else: + color = "red" + g1.emit_edge('N_%x' % r.addr, 'N_%x' % targetaddr, + color=color) + sys.stdout.flush() + if showgraph: + g1.display() + + def showtextonly(self): + self.ranges.sort() + for r in self.ranges: + disassembled = r.disassemble() + print disassembled + del r.text + + +def tab2columns(text): + lines = text.split('\n') + columnwidth = [] + for line in lines: + columns = line.split('\t')[:-1] + while len(columnwidth) < len(columns): + columnwidth.append(0) + for i, s in enumerate(columns): + width = len(s.strip()) + if not s.endswith(':'): + width += 2 + columnwidth[i] = max(columnwidth[i], width) + columnwidth.append(1) + result = [] + for line in lines: + columns = line.split('\t') + text = [] + for width, s in zip(columnwidth, columns): + text.append(s.strip().ljust(width)) + result.append(' '.join(text)) + lengths = [len(line) for line in result] + lengths.append(1) + totalwidth = max(lengths) + return '\\l'.join(result), totalwidth + +# ____________________________________________________________ +# XXX pasted from +# http://codespeak.net/svn/user/arigo/hack/misc/graphlib.py +# but needs to be a bit more subtle later + +from rpython.translator.tool.make_dot import DotGen +from dotviewer.graphclient import display_page + +class Graph(DotGen): + + def highlight(self, word, text, linked_to=None): + if not hasattr(self, '_links'): + self._links = {} + self._links_to = {} + self._links[word] = text + if linked_to: + self._links_to[word] = linked_to + + def display(self): + "Display a graph page locally." + display_page(_Page(self)) + + +class NoGraph(Exception): + pass + +class _Page: + def __init__(self, graph_builder): + if callable(graph_builder): + graph = graph_builder() + else: + graph = graph_builder + if graph is None: + raise NoGraph + self.graph_builder = graph_builder + + def content(self): + return _PageContent(self.graph_builder) + +class _PageContent: + fixedfont = True + + def __init__(self, graph_builder): + if callable(graph_builder): + graph = graph_builder() + else: + graph = graph_builder + assert graph is not None + self.graph_builder = graph_builder + self.graph = graph + self.links = getattr(graph, '_links', {}) + if not hasattr(graph, '_source'): + graph._source = graph.generate(target=None) + self.source = graph._source + + def followlink(self, link): + try: + return _Page(self.graph._links_to[link]) + except NoGraph: + return _Page(self.graph_builder) + +# ____________________________________________________________ + +if __name__ == '__main__': + if '--text' in sys.argv: + sys.argv.remove('--text') + showgraph = False + else: + showgraph = True + if len(sys.argv) != 2: + print >> sys.stderr, __doc__ + sys.exit(2) + # + import cStringIO + from rpython.tool import logparser + log1 = logparser.parse_log_file(sys.argv[1]) + text1 = logparser.extract_category(log1, catprefix='jit-backend-dump') + f = cStringIO.StringIO() + f.writelines(text1) + f.seek(0) + del log1, text1 + # + world = World() + world.parse(f) + if showgraph: + world.find_cross_references() + world.show(showtext=True) + else: + world.showtextonly() From pypy.commits at gmail.com Tue Jan 12 13:04:33 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 10:04:33 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: hg merge default Message-ID: <56954031.6918c20a.deb73.ffffd25c@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2571:1cc859d4fc3d Date: 2016-01-12 19:03 +0100 http://bitbucket.org/cffi/cffi/changeset/1cc859d4fc3d/ Log: hg merge default diff --git a/c/misc_thread_common.h b/c/misc_thread_common.h new file mode 100644 --- /dev/null +++ b/c/misc_thread_common.h @@ -0,0 +1,136 @@ +#ifndef WITH_THREAD +# error "xxx no-thread configuration not tested, please report if you need that" +#endif + + +struct cffi_tls_s { + /* The locally-made thread state. This is only non-null in case + we build the thread state here. It remains null if this thread + had already a thread state provided by CPython. */ + PyThreadState *local_thread_state; + +#ifndef USE__THREAD + /* The saved errno. If the C compiler supports '__thread', then + we use that instead. */ + int saved_errno; +#endif + +#ifdef MS_WIN32 + /* The saved lasterror, on Windows. */ + int saved_lasterror; +#endif +}; + +static struct cffi_tls_s *get_cffi_tls(void); /* in misc_thread_posix.h + or misc_win32.h */ + +static void cffi_thread_shutdown(void *p) +{ + struct cffi_tls_s *tls = (struct cffi_tls_s *)p; + + if (tls->local_thread_state != NULL) { + /* We need to re-acquire the GIL temporarily to free the + thread state. I hope it is not a problem to do it in + a thread-local destructor. + */ + PyEval_RestoreThread(tls->local_thread_state); + PyThreadState_DeleteCurrent(); + } + free(tls); +} + +/* USE__THREAD is defined by setup.py if it finds that it is + syntactically valid to use "__thread" with this C compiler. */ +#ifdef USE__THREAD + +static __thread int cffi_saved_errno = 0; +static void save_errno_only(void) { cffi_saved_errno = errno; } +static void restore_errno_only(void) { errno = cffi_saved_errno; } + +#else + +static void save_errno_only(void) +{ + int saved = errno; + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + tls->saved_errno = saved; +} + +static void restore_errno_only(void) +{ + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + errno = tls->saved_errno; +} + +#endif + + +/* Seems that CPython 3.5.1 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + +static PyThreadState *get_current_ts(void) +{ +#if PY_MAJOR_VERSION >= 3 + return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); +#else + return _PyThreadState_Current; +#endif +} + +static PyGILState_STATE gil_ensure(void) +{ + /* Called at the start of a callback. Replacement for + PyGILState_Ensure(). + */ + PyGILState_STATE result; + struct cffi_tls_s *tls; + PyThreadState *ts = PyGILState_GetThisThreadState(); + + if (ts != NULL) { + ts->gilstate_counter++; + if (ts != get_current_ts()) { + /* common case: 'ts' is our non-current thread state and + we have to make it current and acquire the GIL */ + PyEval_RestoreThread(ts); + return PyGILState_UNLOCKED; + } + else { + return PyGILState_LOCKED; + } + } + else { + /* no thread state here so far. */ + result = PyGILState_Ensure(); + assert(result == PyGILState_UNLOCKED); + + ts = PyGILState_GetThisThreadState(); + assert(ts != NULL); + assert(ts == get_current_ts()); + assert(ts->gilstate_counter >= 1); + + /* Save the now-current thread state inside our 'local_thread_state' + field, to be removed at thread shutdown */ + tls = get_cffi_tls(); + if (tls != NULL) { + tls->local_thread_state = ts; + ts->gilstate_counter++; + } + + return result; + } +} + +static void gil_release(PyGILState_STATE oldstate) +{ + PyGILState_Release(oldstate); +} diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -13,41 +13,15 @@ shut down, using a destructor on the tls key. */ -#ifdef WITH_THREAD #include +#include "misc_thread_common.h" static pthread_key_t cffi_tls_key; -struct cffi_tls_s { - /* The locally-made thread state. This is only non-null in case - we build the thread state here. It remains null if this thread - had already a thread state provided by CPython. */ - PyThreadState *local_thread_state; - - /* The saved errno. If the C compiler supports '__thread', then - we use that instead; this value is not used at all in this case. */ - int saved_errno; -}; - -static void _tls_destructor(void *p) -{ - struct cffi_tls_s *tls = (struct cffi_tls_s *)p; - - if (tls->local_thread_state != NULL) { - /* We need to re-acquire the GIL temporarily to free the - thread state. I hope it is not a problem to do it in - a thread-local destructor. - */ - PyEval_RestoreThread(tls->local_thread_state); - PyThreadState_DeleteCurrent(); - } - free(tls); -} - static void init_cffi_tls(void) { - if (pthread_key_create(&cffi_tls_key, _tls_destructor) != 0) + if (pthread_key_create(&cffi_tls_key, &cffi_thread_shutdown) != 0) PyErr_SetString(PyExc_OSError, "pthread_key_create() failed"); } @@ -71,116 +45,5 @@ return (struct cffi_tls_s *)p; } - -/* USE__THREAD is defined by setup.py if it finds that it is - syntactically valid to use "__thread" with this C compiler. */ -#ifdef USE__THREAD - -static __thread int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -#else - -static void save_errno(void) -{ - int saved = errno; - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - tls->saved_errno = saved; -} - -static void restore_errno(void) -{ - struct cffi_tls_s *tls = get_cffi_tls(); - if (tls != NULL) - errno = tls->saved_errno; -} - -#endif - - -/* Seems that CPython 3.5.1 made our job harder. Did not find out how - to do that without these hacks. We can't use PyThreadState_GET(), - because that calls PyThreadState_Get() which fails an assert if the - result is NULL. */ -#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) - /* this was abruptly un-defined in 3.5.1 */ -void *volatile _PyThreadState_Current; - /* XXX simple volatile access is assumed atomic */ -# define _Py_atomic_load_relaxed(pp) (*(pp)) -#endif - - -static PyThreadState *get_current_ts(void) -{ -#if PY_MAJOR_VERSION >= 3 - return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); -#else - return _PyThreadState_Current; -#endif -} - -static PyGILState_STATE gil_ensure(void) -{ - /* Called at the start of a callback. Replacement for - PyGILState_Ensure(). - */ - PyGILState_STATE result; - struct cffi_tls_s *tls; - PyThreadState *ts = PyGILState_GetThisThreadState(); - - if (ts != NULL) { - ts->gilstate_counter++; - if (ts != get_current_ts()) { - /* common case: 'ts' is our non-current thread state and - we have to make it current and acquire the GIL */ - PyEval_RestoreThread(ts); - return PyGILState_UNLOCKED; - } - else { - return PyGILState_LOCKED; - } - } - else { - /* no thread state here so far. */ - result = PyGILState_Ensure(); - assert(result == PyGILState_UNLOCKED); - - ts = PyGILState_GetThisThreadState(); - assert(ts != NULL); - assert(ts == get_current_ts()); - assert(ts->gilstate_counter >= 1); - - /* Save the now-current thread state inside our 'local_thread_state' - field, to be removed at thread shutdown */ - tls = get_cffi_tls(); - if (tls != NULL) { - tls->local_thread_state = ts; - ts->gilstate_counter++; - } - - return result; - } -} - -static void gil_release(PyGILState_STATE oldstate) -{ - PyGILState_Release(oldstate); -} - - -#else /* !WITH_THREAD */ - -static int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } - -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } - -#endif /* !WITH_THREAD */ - - -#define save_errno_only save_errno -#define restore_errno_only restore_errno +#define save_errno save_errno_only +#define restore_errno restore_errno_only diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -1,15 +1,37 @@ #include /* for alloca() */ + /************************************************************/ /* errno and GetLastError support */ -struct cffi_errno_s { - int saved_errno; - int saved_lasterror; -}; +#include "misc_thread_common.h" static DWORD cffi_tls_index = TLS_OUT_OF_INDEXES; +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + + switch (reason_for_call) { + + case DLL_THREAD_DETACH: + if (cffi_tls_index != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(cffi_tls_index); + if (p != NULL) { + TlsSetValue(cffi_tls_index, NULL); + cffi_thread_shutdown(p); + } + } + break; + + default: + break; + } + return TRUE; +} + static void init_cffi_tls(void) { if (cffi_tls_index == TLS_OUT_OF_INDEXES) { @@ -19,28 +41,29 @@ } } -static struct cffi_errno_s *_geterrno_object(void) +static struct cffi_tls_s *get_cffi_tls(void) { LPVOID p = TlsGetValue(cffi_tls_index); if (p == NULL) { - /* XXX this malloc() leaks */ - p = malloc(sizeof(struct cffi_errno_s)); + p = malloc(sizeof(struct cffi_tls_s)); if (p == NULL) return NULL; - memset(p, 0, sizeof(struct cffi_errno_s)); + memset(p, 0, sizeof(struct cffi_tls_s)); TlsSetValue(cffi_tls_index, p); } - return (struct cffi_errno_s *)p; + return (struct cffi_tls_s *)p; } +#ifdef USE__THREAD +# error "unexpected USE__THREAD on Windows" +#endif + static void save_errno(void) { int current_err = errno; int current_lasterr = GetLastError(); - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { p->saved_errno = current_err; p->saved_lasterror = current_lasterr; @@ -48,23 +71,9 @@ /* else: cannot report the error */ } -static void save_errno_only(void) -{ - int current_err = errno; - struct cffi_errno_s *p; - - p = _geterrno_object(); - if (p != NULL) { - p->saved_errno = current_err; - } - /* else: cannot report the error */ -} - static void restore_errno(void) { - struct cffi_errno_s *p; - - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p != NULL) { SetLastError(p->saved_lasterror); errno = p->saved_errno; @@ -72,16 +81,8 @@ /* else: cannot report the error */ } -static void restore_errno_only(void) -{ - struct cffi_errno_s *p; +/************************************************************/ - p = _geterrno_object(); - if (p != NULL) { - errno = p->saved_errno; - } - /* else: cannot report the error */ -} #if PY_MAJOR_VERSION >= 3 static PyObject *b_getwinerror(PyObject *self, PyObject *args, PyObject *kwds) @@ -96,8 +97,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; @@ -138,7 +138,7 @@ int len; char *s; char *s_buf = NULL; /* Free via LocalFree */ - char s_small_buf[28]; /* Room for "Windows Error 0xFFFFFFFF" */ + char s_small_buf[40]; /* Room for "Windows Error 0xFFFFFFFFFFFFFFFF" */ PyObject *v; static char *keywords[] = {"code", NULL}; @@ -146,8 +146,7 @@ return NULL; if (err == -1) { - struct cffi_errno_s *p; - p = _geterrno_object(); + struct cffi_tls_s *p = get_cffi_tls(); if (p == NULL) return PyErr_NoMemory(); err = p->saved_lasterror; @@ -183,16 +182,6 @@ #endif -#ifdef WITH_THREAD -/* XXX should port the code from misc_thread_posix.h */ -static PyGILState_STATE gil_ensure(void) { return PyGILState_Ensure(); } -static void gil_release(PyGILState_STATE oldst) { PyGILState_Release(oldst); } -#else -static PyGILState_STATE gil_ensure(void) { return -1; } -static void gil_release(PyGILState_STATE oldstate) { } -#endif - - /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ diff --git a/demo/btrfs-snap.py b/demo/btrfs-snap.py --- a/demo/btrfs-snap.py +++ b/demo/btrfs-snap.py @@ -22,10 +22,14 @@ }; """) -v = ffi.verify("#include ") +ffi.set_source("_btrfs_cffi", "#include ") +ffi.compile() +# ____________________________________________________________ +from _btrfs_cffi import ffi, lib + parser = argparse.ArgumentParser(usage=__doc__.strip()) parser.add_argument('source', help='source subvolume') parser.add_argument('target', help='target directory') @@ -41,7 +45,7 @@ args.fd = source args_buffer = ffi.buffer(args) try: - fcntl.ioctl(target, v.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) + fcntl.ioctl(target, lib.BTRFS_IOC_SNAP_CREATE_V2, args_buffer) except IOError as e: print e sys.exit(1) diff --git a/demo/extern_python_varargs.py b/demo/extern_python_varargs.py new file mode 100644 --- /dev/null +++ b/demo/extern_python_varargs.py @@ -0,0 +1,61 @@ +import cffi + +ffi = cffi.FFI() + +ffi.cdef(""" + int my_algo(int); + typedef ... va_list; + extern "Python" int f(int, va_list *); + + int fetch_int(va_list *); + double fetch_double(va_list *); + void *fetch_ptr(va_list *); +""") + +ffi.set_source("_extern_python_cffi", """ + #include + + static int f(int, va_list *); + + static int f1(int n, ...) + { + va_list ap; + va_start(ap, n); + int res = f(n, &ap); + va_end(ap); + return res; + } + + static int fetch_int(va_list *va) { return va_arg((*va), int); } + static double fetch_double(va_list *va) { return va_arg((*va), double); } + static void * fetch_ptr(va_list *va) { return va_arg((*va), void *); } + + static int my_algo(int n) { + return f1(3, n, n+1, n+2) + f1(1, &n) + f1(2, 12.3, 45.6); + } +""") + +ffi.compile() + + +from _extern_python_cffi import ffi, lib + + at ffi.def_extern() +def f(n, va): + if n == 3: + x = lib.fetch_int(va) + y = lib.fetch_int(va) + z = lib.fetch_int(va) + print (x, y, z) + elif n == 1: + ptr = lib.fetch_ptr(va) + print 'ptr to:', ffi.cast("int *", ptr)[0] + elif n == 2: + x = lib.fetch_double(va) + y = lib.fetch_double(va) + print (x, y) + else: + raise AssertionError(n) + return 14 + +print lib.my_algo(10) diff --git a/demo/fastcsv.py b/demo/fastcsv.py --- a/demo/fastcsv.py +++ b/demo/fastcsv.py @@ -4,9 +4,8 @@ # IN-PROGRESS. See the demo at the end of the file -dialect2ffi = {} - -def _make_ffi_from_dialect(dialect): +def _make_ffi_from_dialect(dialect_name): + dialect = csv.get_dialect(dialect_name) ffi = cffi.FFI() @@ -26,7 +25,7 @@ else: d['is_escape_char'] = '&& 0' - lib = ffi.verify(r''' + ffi.set_source('_fastcsv_' + dialect_name, r''' typedef enum { START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, @@ -237,15 +236,16 @@ } ''' % d) - return ffi, lib + ffi.compile() -def fastcsv_reader(f, dialect): - dialect = csv.get_dialect(dialect) +def fastcsv_reader(f, dialect_name): try: - ffi, lib = dialect2ffi[dialect] - except KeyError: - ffi, lib = dialect2ffi[dialect] = _make_ffi_from_dialect(dialect) + module = __import__('_fastcsv_' + dialect_name) + except ImportError: + _make_ffi_from_dialect(dialect_name) + module = __import__('_fastcsv_' + dialect_name) + ffi, lib = module.ffi, module.lib # linelen = -1 for line in f: diff --git a/demo/gmp.py b/demo/gmp.py --- a/demo/gmp.py +++ b/demo/gmp.py @@ -1,33 +1,30 @@ import sys -import cffi - # # This is only a demo based on the GMP library. -# There is a rather more complete version available at: +# There is a rather more complete (but perhaps outdated) version available at: # http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files # -ffi = cffi.FFI() +try: + from _gmp_cffi import ffi, lib +except ImportError: + print 'run gmp_build first, then make sure the shared object is on sys.path' + sys.exit(1) -ffi.cdef(""" - - typedef struct { ...; } MP_INT; - typedef MP_INT mpz_t[1]; - - int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base); - void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2); - char * mpz_get_str (char *string, int base, MP_INT *integer); - -""") - -lib = ffi.verify("#include ", - libraries=['gmp', 'm']) +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module created from gmp_build +# lib "knows" how to call the functions from the set_source parts +# of the module. # ____________________________________________________________ a = ffi.new("mpz_t") b = ffi.new("mpz_t") +if len(sys.argv) < 3: + print 'call as %s bigint1, bigint2' % sys.argv[0] + sys.exit(2) + lib.mpz_init_set_str(a, sys.argv[1], 10) # Assume decimal integers lib.mpz_init_set_str(b, sys.argv[2], 10) # Assume decimal integers lib.mpz_add(a, a, b) # a=a+b diff --git a/demo/gmp_build.py b/demo/gmp_build.py new file mode 100644 --- /dev/null +++ b/demo/gmp_build.py @@ -0,0 +1,27 @@ +import cffi + +# +# This is only a demo based on the GMP library. +# There is a rather more complete (but perhaps outdated) version available at: +# http://bazaar.launchpad.net/~tolot-solar-empire/+junk/gmpy_cffi/files +# + +ffi = cffi.FFI() + +ffi.cdef(""" + + typedef struct { ...; } MP_INT; + typedef MP_INT mpz_t[1]; + + int mpz_init_set_str (MP_INT *dest_integer, char *src_cstring, int base); + void mpz_add (MP_INT *sum, MP_INT *addend1, MP_INT *addend2); + char * mpz_get_str (char *string, int base, MP_INT *integer); + +""") + +ffi.set_source('_gmp_cffi', "#include ", + libraries=['gmp', 'm']) + +if __name__ == '__main__': + ffi.compile() + diff --git a/demo/pwuid.py b/demo/pwuid.py --- a/demo/pwuid.py +++ b/demo/pwuid.py @@ -1,14 +1,7 @@ -from cffi import FFI -ffi = FFI() -ffi.cdef(""" // some declarations from the man page - struct passwd { - char *pw_name; - ...; - }; - struct passwd *getpwuid(int uid); -""") -C = ffi.verify(""" // passed to the real C compiler -#include -#include -""") -print ffi.string(C.getpwuid(0).pw_name) +import sys, os + +# run pwuid_build first, then make sure the shared object is on sys.path +from _pwuid_cffi import ffi, lib + + +print ffi.string(lib.getpwuid(0).pw_name) diff --git a/demo/pwuid_build.py b/demo/pwuid_build.py new file mode 100644 --- /dev/null +++ b/demo/pwuid_build.py @@ -0,0 +1,18 @@ +from cffi import FFI +ffi = FFI() +ffi.cdef(""" // some declarations from the man page + struct passwd { + char *pw_name; + ...; + }; + struct passwd *getpwuid(int uid); +""") + +ffi.set_source('_pwuid_cffi', """ // passed to the real C compiler +#include +#include +""") + + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/readdir2.py b/demo/readdir2.py --- a/demo/readdir2.py +++ b/demo/readdir2.py @@ -1,11 +1,13 @@ -# A Linux-only demo, using verify() instead of hard-coding the exact layouts +# A Linux-only demo, using set_source() instead of hard-coding the exact layouts # import sys -from _readdir2 import ffi, lib if not sys.platform.startswith('linux'): raise Exception("Linux-only demo") +# run readdir2_build first, then make sure the shared object is on sys.path +from _readdir2_cffi import ffi, lib + def walk(basefd, path): print '{', path diff --git a/demo/readdir2_build.py b/demo/readdir2_build.py --- a/demo/readdir2_build.py +++ b/demo/readdir2_build.py @@ -20,7 +20,7 @@ static const int DT_DIR; """) -ffi.set_source("_readdir2", """ +ffi.set_source("_readdir2_cffi", """ #ifndef _ATFILE_SOURCE # define _ATFILE_SOURCE #endif diff --git a/demo/winclipboard.py b/demo/winclipboard.py --- a/demo/winclipboard.py +++ b/demo/winclipboard.py @@ -1,60 +1,40 @@ __author__ = "Israel Fruchter " -from cffi import FFI +import sys, os -ffi = FFI() -ffi.cdef(''' - typedef void * HANDLE; - typedef HANDLE HWND; - typedef int BOOL; - typedef unsigned int UINT; - typedef int SIZE_T; - typedef char * LPTSTR; - typedef HANDLE HGLOBAL; - typedef HANDLE LPVOID; +if not sys.platform == 'win32': + raise Exception("Windows-only demo") - HWND GetConsoleWindow(void); +try: + from _winclipboard_cffi import ffi, lib +except ImportError: + print 'run winclipboard_build first, then make sure the shared object is on sys.path' + sys.exit(1) - LPVOID GlobalLock( HGLOBAL hMem ); - BOOL GlobalUnlock( HGLOBAL hMem ); - HGLOBAL GlobalAlloc(UINT uFlags, SIZE_T dwBytes); - - BOOL OpenClipboard(HWND hWndNewOwner); - BOOL CloseClipboard(void); - BOOL EmptyClipboard(void); - HANDLE SetClipboardData(UINT uFormat, HANDLE hMem); - - #define CF_TEXT ... - #define GMEM_MOVEABLE ... - - void * memcpy(void * s1, void * s2, int n); - ''') - -lib = ffi.verify(''' - #include -''', libraries=["user32"]) - -globals().update(lib.__dict__) +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module _winclipboard_cffi created, +# lib "knows" how to call the functions from the set_source parts +# of the module. def CopyToClipboard(string): ''' use win32 api to copy `string` to the clipboard ''' - hWnd = GetConsoleWindow() + hWnd = lib.GetConsoleWindow() - if OpenClipboard(hWnd): + if lib.OpenClipboard(hWnd): cstring = ffi.new("char[]", string) size = ffi.sizeof(cstring) # make it a moveable memory for other processes - hGlobal = GlobalAlloc(GMEM_MOVEABLE, size) - buffer = GlobalLock(hGlobal) - memcpy(buffer, cstring, size) - GlobalUnlock(hGlobal) + hGlobal = lib.GlobalAlloc(lib.GMEM_MOVEABLE, size) + buffer = lib.GlobalLock(hGlobal) + lib.memcpy(buffer, cstring, size) + lib.GlobalUnlock(hGlobal) - res = EmptyClipboard() - res = SetClipboardData(CF_TEXT, buffer) + res = lib.EmptyClipboard() + res = lib.SetClipboardData(lib.CF_TEXT, buffer) - CloseClipboard() + lib.CloseClipboard() CopyToClipboard("hello world from cffi") diff --git a/demo/winclipboard_build.py b/demo/winclipboard_build.py new file mode 100644 --- /dev/null +++ b/demo/winclipboard_build.py @@ -0,0 +1,36 @@ +from cffi import FFI + +ffi = FFI() +ffi.cdef(''' + typedef void * HANDLE; + typedef HANDLE HWND; + typedef int BOOL; + typedef unsigned int UINT; + typedef int SIZE_T; + typedef char * LPTSTR; + typedef HANDLE HGLOBAL; + typedef HANDLE LPVOID; + + HWND GetConsoleWindow(void); + + LPVOID GlobalLock( HGLOBAL hMem ); + BOOL GlobalUnlock( HGLOBAL hMem ); + HGLOBAL GlobalAlloc(UINT uFlags, SIZE_T dwBytes); + + BOOL OpenClipboard(HWND hWndNewOwner); + BOOL CloseClipboard(void); + BOOL EmptyClipboard(void); + HANDLE SetClipboardData(UINT uFormat, HANDLE hMem); + + #define CF_TEXT ... + #define GMEM_MOVEABLE ... + + void * memcpy(void * s1, void * s2, int n); + ''') + +ffi.set_source('_winclipboard_cffi', ''' + #include +''', libraries=["user32"]) + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/xclient.py b/demo/xclient.py --- a/demo/xclient.py +++ b/demo/xclient.py @@ -1,40 +1,27 @@ -from cffi import FFI +import sys, os -ffi = FFI() -ffi.cdef(""" +# run xclient_build first, then make sure the shared object is on sys.path +from _xclient_cffi import ffi, lib -typedef ... Display; -typedef struct { ...; } Window; -typedef struct { int type; ...; } XEvent; +# ffi "knows" about the declared variables and functions from the +# cdef parts of the module xclient_build created, +# lib "knows" how to call the functions from the set_source parts +# of the module. -Display *XOpenDisplay(char *display_name); -Window DefaultRootWindow(Display *display); -int XMapRaised(Display *display, Window w); -Window XCreateSimpleWindow(Display *display, Window parent, int x, int y, - unsigned int width, unsigned int height, - unsigned int border_width, unsigned long border, - unsigned long background); -int XNextEvent(Display *display, XEvent *event_return); -""") -lib = ffi.verify(""" -#include -""", libraries=['X11']) - -globals().update(lib.__dict__) class XError(Exception): pass def main(): - display = XOpenDisplay(ffi.NULL) + display = lib.XOpenDisplay(ffi.NULL) if display == ffi.NULL: raise XError("cannot open display") - w = XCreateSimpleWindow(display, DefaultRootWindow(display), + w = lib.XCreateSimpleWindow(display, lib.DefaultRootWindow(display), 10, 10, 500, 350, 0, 0, 0) - XMapRaised(display, w) + lib.XMapRaised(display, w) event = ffi.new("XEvent *") - XNextEvent(display, event) + lib.XNextEvent(display, event) if __name__ == '__main__': main() diff --git a/demo/xclient_build.py b/demo/xclient_build.py new file mode 100644 --- /dev/null +++ b/demo/xclient_build.py @@ -0,0 +1,25 @@ +from cffi import FFI +ffi = FFI() +ffi.cdef(""" + +typedef ... Display; +typedef struct { ...; } Window; + +typedef struct { int type; ...; } XEvent; + +Display *XOpenDisplay(char *display_name); +Window DefaultRootWindow(Display *display); +int XMapRaised(Display *display, Window w); +Window XCreateSimpleWindow(Display *display, Window parent, int x, int y, + unsigned int width, unsigned int height, + unsigned int border_width, unsigned long border, + unsigned long background); +int XNextEvent(Display *display, XEvent *event_return); +""") + +ffi.set_source('_xclient_cffi', """ + #include +""", libraries=['X11']) + +if __name__ == '__main__': + ffi.compile() diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -476,7 +476,7 @@ ``@ffi.def_extern()``. The ``@ffi.def_extern()`` decorator should be applied to a global -function, once. This is because each function from the cdef with +function, but *only once.* This is because each function from the cdef with ``extern "Python"`` turns into only one C function. To support some corner cases, it is possible to redefine the attached Python function by calling ``@ffi.def_extern()`` again---but this is not recommended! @@ -616,7 +616,10 @@ } The ``extern "Python"`` functions cannot be variadic for now. This -may be implemented in the future. +may be implemented in the future. (`This demo`__ shows how to do it +anyway, but it is a bit lengthy.) + +.. __: https://bitbucket.org/cffi/cffi/src/default/demo/extern_python_varargs.py Each corresponding Python callback function is defined with the ``@ffi.def_extern()`` decorator. Be careful when writing this diff --git a/testing/cffi1/test_new_ffi_1.py b/testing/cffi1/test_new_ffi_1.py --- a/testing/cffi1/test_new_ffi_1.py +++ b/testing/cffi1/test_new_ffi_1.py @@ -1718,3 +1718,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) From pypy.commits at gmail.com Tue Jan 12 13:36:17 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 10:36:17 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Ups, found the bug Message-ID: <569547a1.e906c20a.ca6ee.1c7a@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2572:e294dc8697d0 Date: 2016-01-12 19:36 +0100 http://bitbucket.org/cffi/cffi/changeset/e294dc8697d0/ Log: Ups, found the bug diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -44,7 +44,8 @@ #else /* --- Windows threads version --- */ # include -# define cffi_compare_and_swap(l,o,n) InterlockedCompareExchangePointer(l,n,o) +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) # define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) # define cffi_read_barrier() (void)0 static volatile LONG _cffi_dummy; @@ -62,7 +63,7 @@ static void _cffi_acquire_reentrant_mutex(void) { - static volatile void *lock = NULL; + static void *volatile lock = NULL; while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { /* should ideally do a spin loop instruction here, but From pypy.commits at gmail.com Tue Jan 12 14:01:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 12 Jan 2016 11:01:37 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Kill trivial method funcgen.name() Message-ID: <56954d91.c9ebc20a.ee756.ffffc26a@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81688:84e61b90451b Date: 2016-01-12 19:00 +0000 http://bitbucket.org/pypy/pypy/changeset/84e61b90451b/ Log: Kill trivial method funcgen.name() diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -83,9 +83,6 @@ seen[v] = True self.vars = uniquemix - def name(self, cname): #virtual - return cname - def implementation_begin(self): SSI_to_SSA(self.graph) self.collect_var_and_types() @@ -219,8 +216,6 @@ yield '}' link = block.exits[0] assert link.exitcase in (False, True) - #yield 'assert(%s == %s);' % (self.expr(block.exitswitch), - # self.genc.nameofvalue(link.exitcase, ct)) for op in self.gen_link(link): yield op elif TYPE in (Signed, Unsigned, SignedLongLong, diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -853,8 +853,7 @@ if self.funcgen: yield '%s;' % ( forward_cdecl(self.implementationtypename, - self.funcgen.name(self.name), self.db.standalone, - is_exported=is_exported)) + self.name, self.db.standalone, is_exported=is_exported)) def implementation(self): if self.funcgen: @@ -871,7 +870,7 @@ # recompute implementationtypename as the argnames may have changed argnames = funcgen.argnames() implementationtypename = self.db.gettype(self.T, argnames=argnames) - yield '%s {' % cdecl(implementationtypename, funcgen.name(self.name)) + yield '%s {' % cdecl(implementationtypename, self.name) # # declare the local variables # From pypy.commits at gmail.com Tue Jan 12 14:30:32 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 12 Jan 2016 11:30:32 -0800 (PST) Subject: [pypy-commit] buildbot default: HACK: don't upload branch names any more. see discussion on #pypy. Message-ID: <56955458.2815c20a.d8200.2038@mx.google.com> Author: Armin Rigo Branch: Changeset: r981:1ffe17a64350 Date: 2016-01-12 20:30 +0100 http://bitbucket.org/pypy/buildbot/changeset/1ffe17a64350/ Log: HACK: don't upload branch names any more. see discussion on #pypy. not sure it works, but the idea is to allow any branch to be displayed, even if it means they'll all end up in the same graphs. diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -809,14 +809,18 @@ '--upload-executable', 'pypy-c' + postfix, '--upload-project', 'PyPy', '--revision', WithProperties('%(got_revision)s'), - '--branch', WithProperties('%(branch)s'), + # HACK: branches are not uploaded any more, so that + # codespeed will display it, even if not "default" + #'--branch', WithProperties('%(branch)s'), '--upload-urls', 'http://speed.pypy.org/', '--upload-baseline', '--upload-baseline-executable', 'pypy-c-jit' + postfix, '--upload-baseline-project', 'PyPy', '--upload-baseline-revision', WithProperties('%(got_revision)s'), - '--upload-baseline-branch', WithProperties('%(branch)s'), + # HACK: branches are not uploaded any more, so that + # codespeed will display it, even if not "default" + #'--upload-baseline-branch', WithProperties('%(branch)s'), '--upload-baseline-urls', 'http://speed.pypy.org/', ], workdir='./benchmarks', From pypy.commits at gmail.com Tue Jan 12 14:40:43 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 12 Jan 2016 11:40:43 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: Add support to skip tests if some list strategies are available in the interpreter Message-ID: <569556bb.41dfc20a.752aa.ffffc25c@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81689:c8c3db337ed7 Date: 2016-01-12 16:52 +0100 http://bitbucket.org/pypy/pypy/changeset/c8c3db337ed7/ Log: Add support to skip tests if some list strategies are available in the interpreter Use that to skip tests obsoleted by RepeatListStrategy diff --git a/lib-python/2.7/test/seq_tests.py b/lib-python/2.7/test/seq_tests.py --- a/lib-python/2.7/test/seq_tests.py +++ b/lib-python/2.7/test/seq_tests.py @@ -5,6 +5,8 @@ import unittest import sys +from test import test_support + # Various iterables # This is used for checking the constructor (here and in test_deque.py) def iterfunc(seqn): @@ -305,6 +307,8 @@ self.assertEqual(self.type2test(s)*(-4), self.type2test([])) self.assertEqual(id(s), id(s*1)) + @unittest.skipIf(test_support.list_strategy('RepeatListStrategy'), + "This test is obsoleted by RepeatListStrategy") def test_bigrepeat(self): import sys # we chose an N such as 2**16 * N does not fit into a cpu word diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -1366,6 +1366,32 @@ return guards.get(platform.python_implementation().lower(), default) # ---------------------------------- +# List strategies can make tests wrong +# This helper makes it easy to skip those + +def list_strategy(st=None): + try: + import __pypy__ + # This will return 'object' if pypy is translated without + # list strategies enabled + if 'list_strategy' in dir(__pypy__): + # Python 2.7.3 (2.2.1+dfsg-1ubuntu0.3, Sep 30 2015, 15:18:40) + # [PyPy 2.2.1 with GCC 4.8.4] + strategy = __pypy__.list_strategy + expected = 'empty' + elif 'strategy' in dir(__pypy__): + # Python 2.7.10 (71b4bf53487c, Jan 05 2016, 23:00:18) + # [PyPy 4.1.0-alpha0 with GCC 4.8.4] + strategy = __pypy__.strategy + expected = 'EmptyListStrategy' + if strategy([]) == expected: + if st is None or strategy([None] * 2) == st: + return True + except ImportError: + pass + return False + +# ---------------------------------- # PyPy extension: you can run:: # python ..../test_foo.py --pdb # to get a pdb prompt in case of exceptions From pypy.commits at gmail.com Tue Jan 12 17:51:47 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 14:51:47 -0800 (PST) Subject: [pypy-commit] pypy default: Let GraphAnalyzer return a conservative result if it encounters a function object which is neither external nor has a graph attached. Message-ID: <56958383.84c9c20a.2d630.4a7a@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81690:1984c2224199 Date: 2016-01-12 23:50 +0100 http://bitbucket.org/pypy/pypy/changeset/1984c2224199/ Log: Let GraphAnalyzer return a conservative result if it encounters a function object which is neither external nor has a graph attached. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -90,7 +90,10 @@ if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = funcobj.graph + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: From pypy.commits at gmail.com Tue Jan 12 18:39:36 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 15:39:36 -0800 (PST) Subject: [pypy-commit] pypy default: Clean up after 58ef780a3875. Message-ID: <56958eb8.c9ebc20a.ee756.13ad@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81692:992f9611a8de Date: 2016-01-13 00:38 +0100 http://bitbucket.org/pypy/pypy/changeset/992f9611a8de/ Log: Clean up after 58ef780a3875. When committing 58ef780a3875, I unintentionally replaced the call of `get_graph()` by `funcobj.graph`. After 419c89606228 this does exactly the same, so I decided to clean up by removing the now- unused import and a superfluous assertion. diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -94,7 +93,6 @@ graph = funcobj.graph except AttributeError: return self.top_result() - assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) From pypy.commits at gmail.com Tue Jan 12 18:39:34 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 15:39:34 -0800 (PST) Subject: [pypy-commit] pypy default: Cleanup get_graph() function. Message-ID: <56958eb6.6a69c20a.d649d.ffffc6ee@mx.google.com> Author: Manuel Jacob Branch: Changeset: r81691:419c89606228 Date: 2016-01-13 00:31 +0100 http://bitbucket.org/pypy/pypy/changeset/419c89606228/ Log: Cleanup get_graph() function. 1) Remove last try block, which was dead. 2) Remove getting funcobj._callable. The `callable` variable was unused after 1). In case you wonder whether there might have been a bug before: AFAIK translator._graphof(funcobj._callable) can't return something if funcobj.graph is not present. diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Tue Jan 12 22:23:43 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:43 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix tests in stderrprinter and io. Message-ID: <5695c33f.aa5dc20a.74c0.ffffb126@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81695:260044caf4ee Date: 2015-12-15 12:49 -0500 http://bitbucket.org/pypy/pypy/changeset/260044caf4ee/ Log: Fix tests in stderrprinter and io. diff --git a/pypy/module/__pypy__/test/test_stderrprinter.py b/pypy/module/__pypy__/test/test_stderrprinter.py --- a/pypy/module/__pypy__/test/test_stderrprinter.py +++ b/pypy/module/__pypy__/test/test_stderrprinter.py @@ -7,7 +7,9 @@ p.close() # this should be a no-op p.flush() # this should be a no-op assert p.fileno() == 2 - assert p.isatty() + #It doesn't make sense to assert this. Stderror could be a tty (the terminal) + #or not, depending on how we are running the tests. + #assert p.isatty() assert p.write('foo') == 3 raises(TypeError, p.write, b'foo') diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -390,7 +390,7 @@ raises(TypeError, pickle.dumps, f, protocol) def test_mod(self): - import _io + import _io, _frozen_importlib typemods = dict((t, t.__module__) for t in vars(_io).values() if isinstance(t, type)) for t, mod in typemods.items(): @@ -398,6 +398,11 @@ assert mod == 'builtins' elif t is _io.UnsupportedOperation: assert mod == 'io' + #TODO: Make sure this is a reasonable thing to do. Check if there is + #a cleaner way to do these checks or if these checks even make sense + #in general. They seem really brittle. + elif t is _frozen_importlib.BuiltinImporter: + assert mod == "_frozen_importlib" else: assert mod == '_io' From pypy.commits at gmail.com Tue Jan 12 22:23:45 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:45 -0800 (PST) Subject: [pypy-commit] pypy py3.3-marky1991-fix-ssl: Fixed ssl. Message-ID: <5695c341.08e11c0a.a7642.2520@mx.google.com> Author: marky1991 Branch: py3.3-marky1991-fix-ssl Changeset: r81696:0f1486faae35 Date: 2015-12-15 16:56 -0500 http://bitbucket.org/pypy/pypy/changeset/0f1486faae35/ Log: Fixed ssl. diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1094,27 +1094,13 @@ return space.str(space.getattr(w_exc, space.wrap("args"))) -class W_Error(interp_exceptions.W_OSError): - "An error occurred in the SSL implementation." - - def descr_str(self, space): - if space.isinstance_w(self.w_strerror, space.w_unicode): - return self.w_strerror - else: - return space.str(space.newtuple(self.args_w)) - -W_Error.typedef = TypeDef( - "ssl.SSLError", - interp_exceptions.W_OSError.typedef, - __new__ = interp_exceptions._new(W_Error), - __doc__ = W_Error.__doc__, - __str__ = interp2app(W_Error.descr_str), - ) - - class ErrorCache: def __init__(self, space): - self.w_error = space.gettypefor(W_Error) + w_socketerror = interp_socket.get_error(space, "error") + self.w_error = space.new_exception_class( + "_ssl.SSLError", w_socketerror) + space.setattr(self.w_error, space.wrap('__str__'), + space.wrap(interp2app(SSLError_descr_str))) self.w_ZeroReturnError = space.new_exception_class( "ssl.SSLZeroReturnError", self.w_error) self.w_WantReadError = space.new_exception_class( From pypy.commits at gmail.com Tue Jan 12 22:23:46 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:46 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Merge ssl fixes back into py3.3 Message-ID: <5695c342.6217c20a.2560b.599a@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81697:49c6f4b27909 Date: 2015-12-15 17:12 -0500 http://bitbucket.org/pypy/pypy/changeset/49c6f4b27909/ Log: Merge ssl fixes back into py3.3 diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1094,27 +1094,13 @@ return space.str(space.getattr(w_exc, space.wrap("args"))) -class W_Error(interp_exceptions.W_OSError): - "An error occurred in the SSL implementation." - - def descr_str(self, space): - if space.isinstance_w(self.w_strerror, space.w_unicode): - return self.w_strerror - else: - return space.str(space.newtuple(self.args_w)) - -W_Error.typedef = TypeDef( - "ssl.SSLError", - interp_exceptions.W_OSError.typedef, - __new__ = interp_exceptions._new(W_Error), - __doc__ = W_Error.__doc__, - __str__ = interp2app(W_Error.descr_str), - ) - - class ErrorCache: def __init__(self, space): - self.w_error = space.gettypefor(W_Error) + w_socketerror = interp_socket.get_error(space, "error") + self.w_error = space.new_exception_class( + "_ssl.SSLError", w_socketerror) + space.setattr(self.w_error, space.wrap('__str__'), + space.wrap(interp2app(SSLError_descr_str))) self.w_ZeroReturnError = space.new_exception_class( "ssl.SSLZeroReturnError", self.w_error) self.w_WantReadError = space.new_exception_class( From pypy.commits at gmail.com Tue Jan 12 22:23:48 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:48 -0800 (PST) Subject: [pypy-commit] pypy fix-3.3-pypy-magic: When doing init_builtin, force a module.init() call to make sure we don't just grab the module out of sys.modules. (Fixes reload()) Message-ID: <5695c344.8205c20a.c5c42.27e6@mx.google.com> Author: marky1991 Branch: fix-3.3-pypy-magic Changeset: r81698:afb2b3cd9535 Date: 2015-12-17 01:22 -0500 http://bitbucket.org/pypy/pypy/changeset/afb2b3cd9535/ Log: When doing init_builtin, force a module.init() call to make sure we don't just grab the module out of sys.modules. (Fixes reload()) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -84,7 +84,9 @@ name = space.str0_w(w_name) if name not in space.builtin_modules: return - return space.getbuiltinmodule(name) + #This is needed to make reload actually reload instead of just using the + #already-present module in sys.modules. + return space.getbuiltinmodule(name, force_init=True) def init_frozen(space, w_name): return None From pypy.commits at gmail.com Tue Jan 12 22:23:50 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:50 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Merging magic fixes into py3.3 . Message-ID: <5695c346.05bd1c0a.be433.263c@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81699:5e2210cd44a3 Date: 2015-12-17 01:24 -0500 http://bitbucket.org/pypy/pypy/changeset/5e2210cd44a3/ Log: Merging magic fixes into py3.3 . diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -84,7 +84,9 @@ name = space.str0_w(w_name) if name not in space.builtin_modules: return - return space.getbuiltinmodule(name) + #This is needed to make reload actually reload instead of just using the + #already-present module in sys.modules. + return space.getbuiltinmodule(name, force_init=True) def init_frozen(space, w_name): return None From pypy.commits at gmail.com Tue Jan 12 22:23:52 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:52 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix sys module testing. Message-ID: <5695c348.6adec20a.ad5ea.0f51@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81700:878013c28c6f Date: 2015-12-20 19:56 -0500 http://bitbucket.org/pypy/pypy/changeset/878013c28c6f/ Log: Fix sys module testing. diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -32,7 +32,8 @@ w_sys.flush_std_files(space) msg = space.bytes_w(space.call_function(w_read)) - assert 'Exception IOError' in msg + #IOError has become an alias for OSError + assert 'Exception OSError' in msg finally: space.setattr(w_sys, space.wrap('stdout'), w_sys.get('__stdout__')) space.setattr(w_sys, space.wrap('stderr'), w_sys.get('__stderr__')) From pypy.commits at gmail.com Tue Jan 12 22:23:54 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:54 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix lib_pypy/test_code_module. Still need to make sure that the tests here are tested elsewhere. Message-ID: <5695c34a.4c301c0a.c2383.248c@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81701:ecf6e6852002 Date: 2015-12-22 21:51 -0500 http://bitbucket.org/pypy/pypy/changeset/ecf6e6852002/ Log: Fix lib_pypy/test_code_module. Still need to make sure that the tests here are tested elsewhere. diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -21,16 +21,22 @@ def test_cause_tb(self): interp = self.get_interp() - interp.runsource('raise IOError from OSError') + #(Arbitrarily) Changing to TypeError as IOError is now an alias of OSError, making + #testing confusing + interp.runsource('raise TypeError from OSError') result = interp.out.getvalue() + #For reasons to me unknown, the code module does not show chained exceptions + #It only shows the last exception. Keeping this for now. + #The test needs to me moved elsewhere if chained exceptions aren't + #already tested elsewhere expected_header = """OSError The above exception was the direct cause of the following exception: Traceback (most recent call last): """ - assert expected_header in result - assert result.endswith("IOError\n") + #assert expected_header in result + assert result.endswith("TypeError\n") def test_context_tb(self): interp = self.get_interp() @@ -45,5 +51,5 @@ Traceback (most recent call last): """ - assert expected_header in result + #assert expected_header in result assert result.endswith("NameError: name '_diana_' is not defined\n") From pypy.commits at gmail.com Tue Jan 12 22:23:55 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:55 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Make sure to grab the import lock when importing. Not sure if this a sufficent fix, but it at least adds this back. Message-ID: <5695c34b.2457c20a.d9372.ffffec8b@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81702:b201f36ae568 Date: 2015-12-29 23:41 -0500 http://bitbucket.org/pypy/pypy/changeset/b201f36ae568/ Log: Make sure to grab the import lock when importing. Not sure if this a sufficent fix, but it at least adds this back. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -75,19 +75,25 @@ w_mod = check_sys_modules_w(space, modulename) if w_mod: return w_mod - if modulename in space.builtin_modules: - return space.getbuiltinmodule(modulename) + try: + lock = getimportlock(space) + lock.acquire_lock() - ec = space.getexecutioncontext() - with open(os.path.join(lib_pypy, modulename + '.py')) as fp: - source = fp.read() - pathname = "" % modulename - code_w = ec.compiler.compile(source, pathname, 'exec', 0) - w_mod = add_module(space, space.wrap(modulename)) - space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod) - space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name) - code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict) - assert check_sys_modules_w(space, modulename) + if modulename in space.builtin_modules: + return space.getbuiltinmodule(modulename) + + ec = space.getexecutioncontext() + with open(os.path.join(lib_pypy, modulename + '.py')) as fp: + source = fp.read() + pathname = "" % modulename + code_w = ec.compiler.compile(source, pathname, 'exec', 0) + w_mod = add_module(space, space.wrap(modulename)) + space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod) + space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name) + code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict) + assert check_sys_modules_w(space, modulename) + finally: + lock.release_lock(silent_after_fork=True) return w_mod @@ -174,6 +180,7 @@ def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock + print("calling original") if self.lock is None: try: self.lock = self.space.allocate_lock() @@ -224,7 +231,9 @@ self.lockcounter = 0 def getimportlock(space): - return space.fromcache(ImportRLock) + me = space.fromcache(ImportRLock) + print(id(me), "id of lock") + return me # __________________________________________________________________ # diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -92,18 +92,31 @@ importlock = getimportlock(space) original_acquire = importlock.acquire_lock def acquire_lock(): + print("calling monkeyed") importlock.count += 1 original_acquire() importlock.count = 0 + print(id(acquire_lock), "acq id") + monkeypatch.setattr(importlock.__class__, 'acquire_lock', acquire_lock) + print(id(getattr(importlock.__class__, + "acquire_lock")), + "getattr id") monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) + print(id(getattr(importlock.__class__, + "acquire_lock")), + "getattr id") + print(id(importlock.__class__.acquire_lock), id(importlock.acquire_lock), id(acquire_lock), "alll") + # An already imported module importhook(space, 'sys') assert importlock.count == 0 # A new module - importhook(space, '__future__') + x = "time" + importhook(space, x) assert importlock.count == 1 + print("yay") # Import it again previous_count = importlock.count - importhook(space, '__future__') + importhook(space, x) assert importlock.count == previous_count From pypy.commits at gmail.com Tue Jan 12 22:23:57 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:57 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Update cffi backend test c file. Message-ID: <5695c34d.482e1c0a.3d702.26ff@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81703:617867e5ac22 Date: 2015-12-29 23:48 -0500 http://bitbucket.org/pypy/pypy/changeset/617867e5ac22/ Log: Update cffi backend test c file. diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,6 +1,9 @@ # ____________________________________________________________ import sys +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" + " of cffi that differs from the one that we" + " get from 'import _cffi_backend'") if sys.version_info < (3,): type_or_class = "type" mandatory_b_prefix = '' @@ -2313,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3393,6 +3393,78 @@ check(4 | 8, "CHB", "GTB") check(4 | 16, "CHB", "ROB") +def test_memmove(): + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + Char = new_primitive_type("char") + CharA = new_array_type(new_pointer_type(Char), None) + p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678]) + memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + memmove(p + 4, newp(CharA, b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + a = array.array('H', [10000, 20000, 30000]) + p = newp(ShortA, 5) + memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + memmove(ba, b"EFGH", 4) + assert ba == bytearray(b"EFGHx") + +def test_memmove_sign_check(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault + +def test_memmove_bad_cdata(): + BInt = new_primitive_type("int") + p = cast(BInt, 42) + py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1) + py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1) + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) @@ -3425,6 +3497,20 @@ "that you are not e.g. mixing up different ffi " "instances)") -def test_version(): - # this test is here mostly for PyPy - assert __version__ == "1.2.1" +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" + +def test_get_common_types(): + d = {} + _get_common_types(d) + assert d['bool'] == '_Bool' From pypy.commits at gmail.com Tue Jan 12 22:23:59 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:23:59 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix many many pickle-related failures. The changes to frozen_importlib are questionable and might be wrong. (Once you replace __import__ with import_with_frames_removed, when we try to pickle __import__ it fails to find a function with name __import__, causing pickling to fail.) Because unbound methods bacame regular functions in python3, pickling them triggers save_global, which fails because they're obviously not globals. Thus, I changed pickle.py to just use the function's __reduce/reduce_ex__ methods as they do in the default branch. Message-ID: <5695c34f.8f7e1c0a.cf222.25d0@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81704:1ea59f14a46a Date: 2015-12-30 17:45 -0500 http://bitbucket.org/pypy/pypy/changeset/1ea59f14a46a/ Log: Fix many many pickle-related failures. The changes to frozen_importlib are questionable and might be wrong. (Once you replace __import__ with import_with_frames_removed, when we try to pickle __import__ it fails to find a function with name __import__, causing pickling to fail.) Because unbound methods bacame regular functions in python3, pickling them triggers save_global, which fails because they're obviously not globals. Thus, I changed pickle.py to just use the function's __reduce/reduce_ex__ methods as they do in the default branch. diff --git a/lib-python/3/pickle.py b/lib-python/3/pickle.py --- a/lib-python/3/pickle.py +++ b/lib-python/3/pickle.py @@ -292,10 +292,15 @@ # Check the type dispatch table t = type(obj) - f = self.dispatch.get(t) - if f: - f(self, obj) # Call unbound method with explicit self - return + #Unbound methods no longer exist, but pyframes rely on being + #able to pickle unbound methods + #This is a pypy-specific requirement, thus the change in the stdlib + is_unbound_method = t == FunctionType and "." in obj.__qualname__ + if not is_unbound_method: + f = self.dispatch.get(t) + if f: + f(self, obj) # Call unbound method with explicit self + return # Check private dispatch table if any, or else copyreg.dispatch_table reduce = getattr(self, 'dispatch_table', dispatch_table).get(t) diff --git a/pypy/module/_frozen_importlib/__init__.py b/pypy/module/_frozen_importlib/__init__.py --- a/pypy/module/_frozen_importlib/__init__.py +++ b/pypy/module/_frozen_importlib/__init__.py @@ -30,7 +30,7 @@ space.wrap(space.builtin)) code_w.exec_code(space, self.w_dict, self.w_dict) - self.w_import = space.wrap(interp_import.import_with_frames_removed) + self.w_import = space.wrap(interp_import.__import__) def startup(self, space): """Copy our __import__ to builtins.""" diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py --- a/pypy/module/_frozen_importlib/interp_import.py +++ b/pypy/module/_frozen_importlib/interp_import.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError @interp2app -def import_with_frames_removed(space, __args__): +def __import__(space, __args__): try: return space.call_args( space.getbuiltinmodule('_frozen_importlib').getdictvalue( From pypy.commits at gmail.com Tue Jan 12 22:24:01 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:01 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix pickling stuff. Also, when (un)pickling functions, pass qualname correctly. Message-ID: <5695c351.8673c20a.386b4.1551@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81705:cdacad9a627a Date: 2015-12-31 21:06 -0500 http://bitbucket.org/pypy/pypy/changeset/cdacad9a627a/ Log: Fix pickling stuff. Also, when (un)pickling functions, pass qualname correctly. diff --git a/lib-python/3/pickle.py b/lib-python/3/pickle.py --- a/lib-python/3/pickle.py +++ b/lib-python/3/pickle.py @@ -23,7 +23,7 @@ """ -from types import FunctionType, BuiltinFunctionType +from types import FunctionType, BuiltinFunctionType, ModuleType from copyreg import dispatch_table from copyreg import _extension_registry, _inverted_registry, _extension_cache import marshal @@ -295,12 +295,10 @@ #Unbound methods no longer exist, but pyframes rely on being #able to pickle unbound methods #This is a pypy-specific requirement, thus the change in the stdlib - is_unbound_method = t == FunctionType and "." in obj.__qualname__ - if not is_unbound_method: - f = self.dispatch.get(t) - if f: - f(self, obj) # Call unbound method with explicit self - return + f = self.dispatch.get(t) + if f: + f(self, obj) # Call unbound method with explicit self + return # Check private dispatch table if any, or else copyreg.dispatch_table reduce = getattr(self, 'dispatch_table', dispatch_table).get(t) @@ -627,6 +625,9 @@ # else tmp is empty, and we're done def save_dict(self, obj): + modict_saver = self._pickle_maybe_moduledict(obj) + if modict_saver is not None: + return self.save_reduce(*modict_saver) write = self.write if self.bin: @@ -677,6 +678,102 @@ write(SETITEM) # else tmp is empty, and we're done + def _pickle_maybe_moduledict(self, obj): + # save module dictionary as "getattr(module, '__dict__')" + try: + name = obj['__name__'] + if type(name) is not str: + return None + themodule = sys.modules[name] + if type(themodule) is not ModuleType: + return None + if themodule.__dict__ is not obj: + return None + except (AttributeError, KeyError, TypeError): + return None + return getattr, (themodule, '__dict__') + + def save_function(self, obj): + try: + return self.save_global(obj) + except PicklingError: + pass + # Check copy_reg.dispatch_table + reduce = dispatch_table.get(type(obj)) + if reduce: + rv = reduce(obj) + else: + # Check for a __reduce_ex__ method, fall back to __reduce__ + reduce = getattr(obj, "__reduce_ex__", None) + if reduce: + rv = reduce(self.proto) + else: + reduce = getattr(obj, "__reduce__", None) + if reduce: + rv = reduce() + else: + raise e + return self.save_reduce(obj=obj, *rv) + dispatch[FunctionType] = save_function + + def save_global(self, obj, name=None, pack=struct.pack): + write = self.write + memo = self.memo + + #This logic is stolen from the protocol 4 logic from 3.5 + #We need it unconditionally as pypy itself relies on it. + if name is None: + name = getattr(obj, '__qualname__', None) + if name is None: + name = obj.__name__ + + module_name = whichmodule(obj, name, allow_qualname=True) + try: + __import__(module_name, level=0) + module = sys.modules[module_name] + obj2 = _getattribute(module, name, allow_qualname=True) + except (ImportError, KeyError, AttributeError): + raise PicklingError( + "Can't pickle %r: it's not found as %s.%s" % + (obj, module_name, name)) + else: + if obj2 is not obj: + raise PicklingError( + "Can't pickle %r: it's not the same object as %s.%s" % + (obj, module_name, name)) + + if self.proto >= 2: + code = _extension_registry.get((module_name, name)) + if code: + assert code > 0 + if code <= 0xff: + write(EXT1 + bytes([code])) + elif code <= 0xffff: + write(EXT2 + bytes([code&0xff, code>>8])) + else: + write(EXT4 + pack("= 3. + if self.proto >= 3: + write(GLOBAL + bytes(module_name, "utf-8") + b'\n' + + bytes(name, "utf-8") + b'\n') + else: + if self.fix_imports: + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + if module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + try: + write(GLOBAL + bytes(module_name, "ascii") + b'\n' + + bytes(name, "ascii") + b'\n') + except UnicodeEncodeError: + raise PicklingError( + "can't pickle global identifier '%s.%s' using " + "pickle protocol %i" % (module, name, self.proto)) + + self.memoize(obj) def save_global(self, obj, name=None, pack=struct.pack): write = self.write memo = self.memo @@ -742,7 +839,6 @@ return self.save_reduce(type, (...,), obj=obj) return self.save_global(obj) - dispatch[FunctionType] = save_global dispatch[BuiltinFunctionType] = save_global dispatch[type] = save_type @@ -764,13 +860,30 @@ # aha, this is the first one :-) memo[id(memo)]=[x] +def _getattribute(obj, name, allow_qualname=False): + dotted_path = name.split(".") + if not allow_qualname and len(dotted_path) > 1: + raise AttributeError("Can't get qualified attribute {!r} on {!r}; " + + "use protocols >= 4 to enable support" + .format(name, obj)) + for subpath in dotted_path: + if subpath == '': + raise AttributeError("Can't get local attribute {!r} on {!r}" + .format(name, obj)) + try: + obj = getattr(obj, subpath) + except AttributeError: + raise AttributeError("Can't get attribute {!r} on {!r}" + .format(name, obj)) + return obj + # A cache for whichmodule(), mapping a function object to the name of # the module in which the function was found. classmap = {} # called classmap for backwards compatibility -def whichmodule(func, funcname): +def whichmodule(obj, name, allow_qualname=False): """Figure out the module in which a function occurs. Search sys.modules for the module. @@ -779,22 +892,23 @@ If the function cannot be found, return "__main__". """ # Python functions should always get an __module__ from their globals. - mod = getattr(func, "__module__", None) + mod = getattr(obj, "__module__", None) if mod is not None: return mod - if func in classmap: - return classmap[func] + if obj in classmap: + return classmap[obj] - for name, module in list(sys.modules.items()): - if module is None: + for module_name, module in list(sys.modules.items()): + if module_name == '__main__' or module is None: continue # skip dummy package entries - if name != '__main__' and getattr(module, funcname, None) is func: - break - else: - name = '__main__' - classmap[func] = name - return name - + try: + if _getattribute(module, name, allow_qualname) is obj: + classmap[obj] = module_name + return module_name + except AttributeError: + pass + classmap[obj] = '__main__' + return '__main__' # Unpickling machinery diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -306,6 +306,7 @@ tup_base = [] tup_state = [ w(self.name), + w(self.qualname), w_doc, w(self.code), w_func_globals, @@ -319,8 +320,8 @@ def descr_function__setstate__(self, space, w_args): args_w = space.unpackiterable(w_args) try: - (w_name, w_doc, w_code, w_func_globals, w_closure, w_defs, - w_func_dict, w_module) = args_w + (w_name, w_qualname, w_doc, w_code, w_func_globals, w_closure, + w_defs, w_func_dict, w_module) = args_w except ValueError: # wrong args raise OperationError(space.w_ValueError, @@ -328,6 +329,7 @@ self.space = space self.name = space.str_w(w_name) + self.qualname = space.str_w(w_qualname) self.code = space.interp_w(Code, w_code) if not space.is_w(w_closure, space.w_None): from pypy.interpreter.nestedscope import Cell diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -394,8 +394,10 @@ import pickle tdict = {'2':2, '3':3, '5':5} diter = iter(tdict) - next(diter) - raises(TypeError, pickle.dumps, diter) + seen = next(diter) + pckl = pickle.dumps(diter) + result = pickle.loads(pckl) + assert set(result) == (set('235') - set(seen)) def test_pickle_reversed(self): import pickle From pypy.commits at gmail.com Tue Jan 12 22:24:04 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:04 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Get rid of debugging junk. Message-ID: <5695c354.c4b61c0a.55951.fffff814@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81707:114355f9d9f5 Date: 2016-01-04 12:09 -0500 http://bitbucket.org/pypy/pypy/changeset/114355f9d9f5/ Log: Get rid of debugging junk. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -180,7 +180,6 @@ def acquire_lock(self): # this function runs with the GIL acquired so there is no race # condition in the creation of the lock - print("calling original") if self.lock is None: try: self.lock = self.space.allocate_lock() @@ -231,9 +230,7 @@ self.lockcounter = 0 def getimportlock(space): - me = space.fromcache(ImportRLock) - print(id(me), "id of lock") - return me + return space.fromcache(ImportRLock) # __________________________________________________________________ # From pypy.commits at gmail.com Tue Jan 12 22:24:06 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:06 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix translation error caused by the qualname fix. Message-ID: <5695c356.85e41c0a.1595e.ffffeec1@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81708:57c626958f94 Date: 2016-01-05 02:23 -0500 http://bitbucket.org/pypy/pypy/changeset/57c626958f94/ Log: Fix translation error caused by the qualname fix. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -329,7 +329,7 @@ self.space = space self.name = space.str_w(w_name) - self.qualname = space.str_w(w_qualname) + self.qualname = space.str_w(w_qualname).decode("utf-8") self.code = space.interp_w(Code, w_code) if not space.is_w(w_closure, space.w_None): from pypy.interpreter.nestedscope import Cell From pypy.commits at gmail.com Tue Jan 12 22:24:07 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:07 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Remove this test as per 4d306d6. Message-ID: <5695c357.8e371c0a.bcd8f.23b6@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81709:63d18f9eccd3 Date: 2016-01-07 00:33 -0500 http://bitbucket.org/pypy/pypy/changeset/63d18f9eccd3/ Log: Remove this test as per 4d306d6. diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -808,31 +808,6 @@ data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data - def test_non_interactive_stdout_fully_buffered(self): - if os.name == 'nt': - try: - import __pypy__ - except: - py.test.skip('app_main cannot run on non-pypy for windows') - path = getscript(r""" - import sys, time - sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers - time.sleep(1) - sys.stderr.write('\x00[STDERR]\n\x00') - time.sleep(1) - # stdout flushed automatically here - """) - cmdline = '%s -u "%s" %s' % (python3, app_main, path) - print 'POPEN:', cmdline - child_in, child_out_err = os.popen4(cmdline) - data = child_out_err.read(11) - # Py3 is always at least line buffered - assert data == '\x00(STDOUT)\n\x00' # from stdout - child_in.close() - data = child_out_err.read(11) - assert data == '\x00[STDERR]\n\x00' # from stderr - child_out_err.close() - def test_non_interactive_stdout_unbuffered(self, monkeypatch): monkeypatch.setenv('PYTHONUNBUFFERED', '1') if os.name == 'nt': From pypy.commits at gmail.com Tue Jan 12 22:24:02 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:02 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Turn qualname into a unicode object in the ast phase. (Fixes repr/str of class objects with non-ascii names) Message-ID: <5695c352.88d31c0a.103a2.2840@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81706:929b79827d35 Date: 2016-01-02 03:32 -0500 http://bitbucket.org/pypy/pypy/changeset/929b79827d35/ Log: Turn qualname into a unicode object in the ast phase. (Fixes repr/str of class objects with non-ascii names) diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1381,7 +1381,7 @@ # ... and store it as __module__ self.name_op("__module__", ast.Store) # store the qualname - w_qualname = self.space.wrap(self.qualname) + w_qualname = self.space.wrap(self.qualname.decode("utf-8")) self.load_const(w_qualname) self.name_op("__qualname__", ast.Store) # compile the body proper From pypy.commits at gmail.com Tue Jan 12 22:24:09 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:09 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Importlib raises a TypeError when you try to set __package__ to a string nowadays. Updating the test. Message-ID: <5695c359.2a06c20a.83684.ffffa9bc@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81710:ac2adddb9bb1 Date: 2016-01-07 23:04 -0500 http://bitbucket.org/pypy/pypy/changeset/ac2adddb9bb1/ Log: Importlib raises a TypeError when you try to set __package__ to a string nowadays. Updating the test. diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -499,7 +499,7 @@ # Check relative fails when __package__ set to a non-string ns = dict(__package__=object()) check_absolute() - raises(ValueError, check_relative) + raises(TypeError, check_relative) def test_import_function(self): # More tests for __import__ From pypy.commits at gmail.com Tue Jan 12 22:24:11 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:11 -0800 (PST) Subject: [pypy-commit] pypy py3.3: As of 3.3, this scenario actually raises a KeyError on Cpython, not an ImportError anymore. Message-ID: <5695c35b.cb571c0a.75fb7.ffffee25@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81711:fc8ca82ec8d6 Date: 2016-01-07 23:52 -0500 http://bitbucket.org/pypy/pypy/changeset/fc8ca82ec8d6/ Log: As of 3.3, this scenario actually raises a KeyError on Cpython, not an ImportError anymore. diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -636,7 +636,9 @@ def test_del_from_sys_modules(self): try: import del_sys_module - except ImportError: + #This raises a KeyError in cpython, + #not an import error + except KeyError: pass # ok else: assert False, 'should not work' From pypy.commits at gmail.com Tue Jan 12 22:24:12 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:12 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Get rid of debugging crap from the tests for import locks. Message-ID: <5695c35c.6918c20a.deb73.609b@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81712:1b716d173076 Date: 2016-01-09 16:01 -0500 http://bitbucket.org/pypy/pypy/changeset/1b716d173076/ Log: Get rid of debugging crap from the tests for import locks. diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -92,31 +92,18 @@ importlock = getimportlock(space) original_acquire = importlock.acquire_lock def acquire_lock(): - print("calling monkeyed") importlock.count += 1 original_acquire() importlock.count = 0 - print(id(acquire_lock), "acq id") monkeypatch.setattr(importlock.__class__, 'acquire_lock', acquire_lock) - print(id(getattr(importlock.__class__, - "acquire_lock")), - "getattr id") monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) - - print(id(getattr(importlock.__class__, - "acquire_lock")), - "getattr id") - print(id(importlock.__class__.acquire_lock), id(importlock.acquire_lock), id(acquire_lock), "alll") - # An already imported module importhook(space, 'sys') assert importlock.count == 0 # A new module - x = "time" - importhook(space, x) + importhook(space, "time") assert importlock.count == 1 - print("yay") # Import it again previous_count = importlock.count - importhook(space, x) + importhook(space, "time") assert importlock.count == previous_count From pypy.commits at gmail.com Tue Jan 12 22:24:14 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:14 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Remove final unnecessary change to test_import_lock Message-ID: <5695c35e.ccaa1c0a.51e03.5965@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81713:03071883cceb Date: 2016-01-09 16:07 -0500 http://bitbucket.org/pypy/pypy/changeset/03071883cceb/ Log: Remove final unnecessary change to test_import_lock diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -95,9 +95,9 @@ importlock.count += 1 original_acquire() importlock.count = 0 - monkeypatch.setattr(importlock.__class__, 'acquire_lock', acquire_lock) monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) # An already imported module + importhook(space, 'sys') assert importlock.count == 0 # A new module From pypy.commits at gmail.com Tue Jan 12 22:24:16 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:16 -0800 (PST) Subject: [pypy-commit] pypy py3.3: All the pickle-related tests still seem to be passing after making less drastic changes to pickle.py . Message-ID: <5695c360.8f7e1c0a.cf222.25e0@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81714:b2473fd210f4 Date: 2016-01-10 14:17 -0500 http://bitbucket.org/pypy/pypy/changeset/b2473fd210f4/ Log: All the pickle-related tests still seem to be passing after making less drastic changes to pickle.py . diff --git a/lib-python/3/pickle.py b/lib-python/3/pickle.py --- a/lib-python/3/pickle.py +++ b/lib-python/3/pickle.py @@ -292,9 +292,6 @@ # Check the type dispatch table t = type(obj) - #Unbound methods no longer exist, but pyframes rely on being - #able to pickle unbound methods - #This is a pypy-specific requirement, thus the change in the stdlib f = self.dispatch.get(t) if f: f(self, obj) # Call unbound method with explicit self @@ -720,64 +717,6 @@ write = self.write memo = self.memo - #This logic is stolen from the protocol 4 logic from 3.5 - #We need it unconditionally as pypy itself relies on it. - if name is None: - name = getattr(obj, '__qualname__', None) - if name is None: - name = obj.__name__ - - module_name = whichmodule(obj, name, allow_qualname=True) - try: - __import__(module_name, level=0) - module = sys.modules[module_name] - obj2 = _getattribute(module, name, allow_qualname=True) - except (ImportError, KeyError, AttributeError): - raise PicklingError( - "Can't pickle %r: it's not found as %s.%s" % - (obj, module_name, name)) - else: - if obj2 is not obj: - raise PicklingError( - "Can't pickle %r: it's not the same object as %s.%s" % - (obj, module_name, name)) - - if self.proto >= 2: - code = _extension_registry.get((module_name, name)) - if code: - assert code > 0 - if code <= 0xff: - write(EXT1 + bytes([code])) - elif code <= 0xffff: - write(EXT2 + bytes([code&0xff, code>>8])) - else: - write(EXT4 + pack("= 3. - if self.proto >= 3: - write(GLOBAL + bytes(module_name, "utf-8") + b'\n' + - bytes(name, "utf-8") + b'\n') - else: - if self.fix_imports: - r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING - r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING - if (module_name, name) in r_name_mapping: - module_name, name = r_name_mapping[(module_name, name)] - if module_name in r_import_mapping: - module_name = r_import_mapping[module_name] - try: - write(GLOBAL + bytes(module_name, "ascii") + b'\n' + - bytes(name, "ascii") + b'\n') - except UnicodeEncodeError: - raise PicklingError( - "can't pickle global identifier '%s.%s' using " - "pickle protocol %i" % (module, name, self.proto)) - - self.memoize(obj) - def save_global(self, obj, name=None, pack=struct.pack): - write = self.write - memo = self.memo - if name is None: name = obj.__name__ @@ -839,6 +778,7 @@ return self.save_reduce(type, (...,), obj=obj) return self.save_global(obj) + dispatch[FunctionType] = save_function dispatch[BuiltinFunctionType] = save_global dispatch[type] = save_type @@ -860,30 +800,13 @@ # aha, this is the first one :-) memo[id(memo)]=[x] -def _getattribute(obj, name, allow_qualname=False): - dotted_path = name.split(".") - if not allow_qualname and len(dotted_path) > 1: - raise AttributeError("Can't get qualified attribute {!r} on {!r}; " + - "use protocols >= 4 to enable support" - .format(name, obj)) - for subpath in dotted_path: - if subpath == '': - raise AttributeError("Can't get local attribute {!r} on {!r}" - .format(name, obj)) - try: - obj = getattr(obj, subpath) - except AttributeError: - raise AttributeError("Can't get attribute {!r} on {!r}" - .format(name, obj)) - return obj - # A cache for whichmodule(), mapping a function object to the name of # the module in which the function was found. classmap = {} # called classmap for backwards compatibility -def whichmodule(obj, name, allow_qualname=False): +def whichmodule(func, funcname): """Figure out the module in which a function occurs. Search sys.modules for the module. @@ -892,23 +815,22 @@ If the function cannot be found, return "__main__". """ # Python functions should always get an __module__ from their globals. - mod = getattr(obj, "__module__", None) + mod = getattr(func, "__module__", None) if mod is not None: return mod - if obj in classmap: - return classmap[obj] + if func in classmap: + return classmap[func] - for module_name, module in list(sys.modules.items()): - if module_name == '__main__' or module is None: + for name, module in list(sys.modules.items()): + if module is None: continue # skip dummy package entries - try: - if _getattribute(module, name, allow_qualname) is obj: - classmap[obj] = module_name - return module_name - except AttributeError: - pass - classmap[obj] = '__main__' - return '__main__' + if name != '__main__' and getattr(module, funcname, None) is func: + break + else: + name = '__main__' + classmap[func] = name + return name + # Unpickling machinery From pypy.commits at gmail.com Tue Jan 12 22:24:18 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:18 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Match commenting style to PEP-8 as requested. Also, went ahead and fixed the code module to mostly match py3k. Message-ID: <5695c362.e906c20a.ca6ee.ffff9c90@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81715:03591a1499c8 Date: 2016-01-10 15:07 -0500 http://bitbucket.org/pypy/pypy/changeset/03591a1499c8/ Log: Match commenting style to PEP-8 as requested. Also, went ahead and fixed the code module to mostly match py3k. diff --git a/lib-python/3/code.py b/lib-python/3/code.py --- a/lib-python/3/code.py +++ b/lib-python/3/code.py @@ -105,10 +105,9 @@ The output is written by self.write(), below. """ - type, value, tb = sys.exc_info() + type, value, sys.last_traceback = sys.exc_info() sys.last_type = type sys.last_value = value - sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: @@ -126,7 +125,7 @@ else: # If someone has set sys.excepthook, we let that take precedence # over self.write - sys.excepthook(type, value, tb) + sys.excepthook(type, value, self.last_traceback) def showtraceback(self): """Display the exception that just occurred. @@ -136,25 +135,35 @@ The output is written by self.write(), below. """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb try: - type, value, tb = sys.exc_info() - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - tblist = traceback.extract_tb(tb) - del tblist[:1] - lines = traceback.format_list(tblist) - if lines: - lines.insert(0, "Traceback (most recent call last):\n") - lines.extend(traceback.format_exception_only(type, value)) + lines = [] + for value, tb in traceback._iter_chain(*ei[1:]): + if isinstance(value, str): + lines.append(value) + lines.append('\n') + continue + if tb: + tblist = traceback.extract_tb(tb) + if tb is last_tb: + # The last traceback includes the frame we + # exec'd in + del tblist[:1] + tblines = traceback.format_list(tblist) + if tblines: + lines.append("Traceback (most recent call last):\n") + lines.extend(tblines) + lines.extend(traceback.format_exception_only(type(value), + value)) finally: - tblist = tb = None + tblist = last_tb = ei = None if sys.excepthook is sys.__excepthook__: self.write(''.join(lines)) else: # If someone has set sys.excepthook, we let that take precedence # over self.write - sys.excepthook(type, value, tb) + sys.excepthook(sys.last_type, sys.last_value, last_tb) def write(self, data): """Write a string. diff --git a/pypy/module/__pypy__/test/test_stderrprinter.py b/pypy/module/__pypy__/test/test_stderrprinter.py --- a/pypy/module/__pypy__/test/test_stderrprinter.py +++ b/pypy/module/__pypy__/test/test_stderrprinter.py @@ -7,9 +7,9 @@ p.close() # this should be a no-op p.flush() # this should be a no-op assert p.fileno() == 2 - #It doesn't make sense to assert this. Stderror could be a tty (the terminal) - #or not, depending on how we are running the tests. - #assert p.isatty() + # It doesn't make sense to assert this. Stderror could be a tty + # (the terminal) or not, depending on how we are running the tests. + # assert p.isatty() assert p.write('foo') == 3 raises(TypeError, p.write, b'foo') diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -84,8 +84,8 @@ name = space.str0_w(w_name) if name not in space.builtin_modules: return - #This is needed to make reload actually reload instead of just using the - #already-present module in sys.modules. + # force_init is needed to make reload actually reload instead of just + # using the already-present module in sys.modules. return space.getbuiltinmodule(name, force_init=True) def init_frozen(space, w_name): diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -32,7 +32,7 @@ w_sys.flush_std_files(space) msg = space.bytes_w(space.call_function(w_read)) - #IOError has become an alias for OSError + # IOError has become an alias for OSError assert 'Exception OSError' in msg finally: space.setattr(w_sys, space.wrap('stdout'), w_sys.get('__stdout__')) diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -21,21 +21,17 @@ def test_cause_tb(self): interp = self.get_interp() - #(Arbitrarily) Changing to TypeError as IOError is now an alias of OSError, making - #testing confusing + # (Arbitrarily) Changing to TypeError as IOError is now an alias of + # OSError, making testing confusing interp.runsource('raise TypeError from OSError') result = interp.out.getvalue() - #For reasons to me unknown, the code module does not show chained exceptions - #It only shows the last exception. Keeping this for now. - #The test needs to me moved elsewhere if chained exceptions aren't - #already tested elsewhere expected_header = """OSError The above exception was the direct cause of the following exception: Traceback (most recent call last): """ - #assert expected_header in result + assert expected_header in result assert result.endswith("TypeError\n") def test_context_tb(self): @@ -51,5 +47,5 @@ Traceback (most recent call last): """ - #assert expected_header in result + assert expected_header in result assert result.endswith("NameError: name '_diana_' is not defined\n") From pypy.commits at gmail.com Tue Jan 12 22:24:23 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:23 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Merging master py3.3 into my fork. Message-ID: <5695c367.4c0c1c0a.25017.fffff265@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81716:7147579b4e95 Date: 2016-01-10 15:26 -0500 http://bitbucket.org/pypy/pypy/changeset/7147579b4e95/ Log: Merging master py3.3 into my fork. diff too long, truncating to 2000 out of 122807 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -15,3 +15,5 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 +f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -56,14 +56,15 @@ Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen + Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -87,7 +88,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -168,7 +168,6 @@ Michael Twomey Lucian Branescu Mihaila Yichao Yu - Anton Gulenko Gabriel Lavoie Olivier Dormond Jared Grubb @@ -201,9 +200,12 @@ Alex Perry Vincent Legoll Alan McIntyre + Spenser Bauman Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre + Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg @@ -215,6 +217,7 @@ Carl Meyer Karl Ramm Pieter Zieschang + Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -234,6 +237,7 @@ Lutz Paelike Lucio Torre Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo Dan Buch Miguel de Val Borro @@ -244,14 +248,17 @@ Martin Blais Lene Wagner Tomo Cocoa + Kim Jin Su Toni Mattis Lucas Stadler Julian Berman + Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez William Leslie Bobby Impollonia + Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara @@ -282,6 +289,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey @@ -308,13 +316,16 @@ Stefan Marr jiaaro Mads Kiilerich + Richard Lancaster opassembler.py Antony Lee + Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer Even Wiik Thomassen jbs squeaky + Zearin soareschen Kurt Griffiths Mike Bayer @@ -326,6 +337,7 @@ Anna Ravencroft Andrey Churin Dan Crosta + Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz @@ -352,8 +364,7 @@ Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files in the 'lib-python/2.7' directory are all copyrighted by the Python Software Foundation and licensed -under the Python Software License of which you can find a copy here: -http://www.python.org/doc/Copyright.html +under the terms that you can find here: https://docs.python.org/2/license.html License for 'pypy/module/unicodedata/' ====================================== @@ -430,12 +441,12 @@ _gdbm module, provided in the file lib_pypy/_gdbm.py, is redistributed under the terms of the GPL license as well. -License for 'pypy/module/_vmprof/src' +License for 'rpython/rlib/rvmprof/src' -------------------------------------- The code is based on gperftools. You may see a copy of the License for it at - https://code.google.com/p/gperftools/source/browse/COPYING + https://github.com/gperftools/gperftools/blob/master/COPYING License for 'liblzma and 'lzmaffi' ---------------------------------- diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/3/_abcoll.py b/lib-python/3/_abcoll.py new file mode 100644 --- /dev/null +++ b/lib-python/3/_abcoll.py @@ -0,0 +1,623 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +DON'T USE THIS MODULE DIRECTLY! The classes here should be imported +via collections; they are defined here only to alleviate certain +bootstrapping issues. Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Hashable", "Iterable", "Iterator", + "Sized", "Container", "Callable", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + + +### collection related types which are not exposed through builtin ### +## iterators ## +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +dict_proxy = type(type.__dict__) + + +### ONE-TRICK PONIES ### + +class Hashable(metaclass=ABCMeta): + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + for B in C.__mro__: + if "__hash__" in B.__dict__: + if B.__dict__["__hash__"]: + return True + break + return NotImplemented + + +class Iterable(metaclass=ABCMeta): + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + if any("__iter__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Iterator(Iterable): + + @abstractmethod + def __next__(self): + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + if (any("__next__" in B.__dict__ for B in C.__mro__) and + any("__iter__" in B.__dict__ for B in C.__mro__)): + return True + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + +class Sized(metaclass=ABCMeta): + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + if any("__len__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Container(metaclass=ABCMeta): + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + if any("__contains__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class Callable(metaclass=ABCMeta): + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + if any("__call__" in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +### SETS ### + + +class Set(Sized, Iterable, Container): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other.__lt__(self) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + return other.__le__(self) + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + def __ne__(self, other): + return not (self == other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + def isdisjoint(self, other): + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Sized, Iterable, Container): + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + return KeysView(self) + + def items(self): + return ItemsView(self) + + def values(self): + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + def __ne__(self, other): + return not (self == other) + + +class MappingView(Sized): + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + for key in self._mapping: + yield key + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView): + + def __contains__(self, value): + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + try: + key = next(iter(self)) + except StopIteration: + raise KeyError + value = self[key] + del self[key] + return key, value + + def clear(self): + try: + while True: + self.popitem() + except KeyError: + pass + + def update(*args, **kwds): + if len(args) > 2: + raise TypeError("update() takes at most 2 positional " + "arguments ({} given)".format(len(args))) + elif not args: + raise TypeError("update() takes at least 1 argument (0 given)") + self = args[0] + other = args[1] if len(args) >= 2 else () + + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Sized, Iterable, Container): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value): + for i, v in enumerate(self): + if v == value: + return i + raise ValueError + + def count(self, value): + return sum(1 for v in self if v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + raise IndexError + + def append(self, value): + self.insert(len(self), value) + + def reverse(self): + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + for v in values: + self.append(v) + + def pop(self, index=-1): + v = self[index] + del self[index] + return v + + def remove(self, value): + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/lib-python/3/collections.py b/lib-python/3/collections.py new file mode 100644 --- /dev/null +++ b/lib-python/3/collections.py @@ -0,0 +1,1091 @@ +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict'] +# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. +# They should however be considered an integral part of collections.py. +from _abcoll import * +import _abcoll +__all__ += _abcoll.__all__ + +from _collections import deque, defaultdict +from operator import itemgetter as _itemgetter +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +################################################################################ +### OrderedDict +################################################################################ + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last==False). + + Raises KeyError if the element does not exist. + When last=True, acts like a fast version of self[key]=self.pop(key). + + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + last.next = root.prev = link + else: + first = root.next + link.prev = root + link.next = first + root.next = first.prev = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = MutableMapping.update + keys = MutableMapping.keys + values = MutableMapping.values + items = MutableMapping.items + __ne__ = MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + +################################################################################ +### namedtuple +################################################################################ + +_class_template = '''\ +from builtins import property as _property, tuple as _tuple +from operator import itemgetter as _itemgetter +from collections import OrderedDict + +class {typename}(tuple): + '{typename}({arg_list})' + + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + '({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values' + return OrderedDict(zip(self._fields, self)) + + __dict__ = property(_asdict) + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % list(kwds)) + return result + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + def __getstate__(self): + 'Exclude the OrderedDict from pickling' + return None + +{field_defs} +''' + +_repr_template = '{name}=%r' + +_field_template = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + +def namedtuple(typename, field_names, verbose=False, rename=False): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = list(map(str, field_names)) + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not all(c.isalnum() or c=='_' for c in name) + or _iskeyword(name) + or not name + or name[0].isdigit() + or name.startswith('_') + or name in seen): + field_names[index] = '_%d' % index + seen.add(name) + for name in [typename] + field_names: + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen: + raise ValueError('Encountered duplicate field name: %r' % name) + seen.add(name) + + # Fill-in the class template + class_definition = _class_template.format( + typename = typename, + field_names = tuple(field_names), + num_fields = len(field_names), + arg_list = repr(tuple(field_names)).replace("'", "")[1:-1], + repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names), + field_defs = '\n'.join(_field_template.format(index=index, name=name) + for index, name in enumerate(field_names)) + ) + + # Execute the template string in a temporary namespace and + # support tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(__name__='namedtuple_%s' % typename) + try: + exec(class_definition, namespace) + except SyntaxError as e: + raise SyntaxError(e.msg + ':\n\n' + class_definition) + result = namespace[typename] + if verbose: + print(class_definition) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + super().__init__() + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super().update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + +######################################################################## +### ChainMap (helper for configparser) +######################################################################## + +class _ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(MutableMapping): + + # Start by filling-out the abstract methods + def __init__(self, dict=None, **kwargs): + self.data = {} + if dict is not None: + self.update(dict) + if len(kwargs): + self.update(kwargs) + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + + +################################################################################ +### UserList +################################################################################ + +class UserList(MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __ne__(self, other): return self.data != self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __ne__(self, string): + if isinstance(string, UserString): + return self.data != string.data + return self.data != string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding=None, errors=None): # XXX improve this? + if encoding: + if errors: + return self.__class__(self.data.encode(encoding, errors)) + return self.__class__(self.data.encode(encoding)) + return self.__class__(self.data.encode()) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, *args, **kwds): + return self.data.format(*args, **kwds) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=0): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) + + + +################################################################################ +### Simple tests +################################################################################ + +if __name__ == '__main__': + # verify that instances can be pickled + from pickle import loads, dumps + Point = namedtuple('Point', 'x, y', True) + p = Point(x=10, y=20) + assert p == loads(dumps(p)) + + # test and demonstrate ability to override methods + class Point(namedtuple('Point', 'x y')): + __slots__ = () + @property + def hypot(self): + return (self.x ** 2 + self.y ** 2) ** 0.5 + def __str__(self): + return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) + + for p in Point(3, 4), Point(14, 5/7.): + print (p) + + class Point(namedtuple('Point', 'x y')): + 'Point class with optimized _make() and _replace() without error-checking' + __slots__ = () + _make = classmethod(tuple.__new__) + def _replace(self, _map=map, **kwds): + return self._make(_map(kwds.get, ('x', 'y'), self)) + + print(Point(11, 22)._replace(x=100)) + + Point3D = namedtuple('Point3D', Point._fields + ('z',)) + print(Point3D.__doc__) + + import doctest + TestResults = namedtuple('TestResults', 'failed attempted') + print(TestResults(*doctest.testmod())) diff --git a/lib-python/3/curses/wrapper.py b/lib-python/3/curses/wrapper.py new file mode 100644 --- /dev/null +++ b/lib-python/3/curses/wrapper.py @@ -0,0 +1,50 @@ +"""curses.wrapper + +Contains one function, wrapper(), which runs another function which +should be the rest of your curses-based application. If the +application raises an exception, wrapper() will restore the terminal +to a sane state so you can read the resulting traceback. + +""" + +import curses + +def wrapper(func, *args, **kwds): + """Wrapper function that initializes curses and calls another function, + restoring normal keyboard/screen behavior on error. + The callable object 'func' is then passed the main window 'stdscr' + as its first argument, followed by any other arguments passed to + wrapper(). + """ + + try: + # Initialize curses + stdscr = curses.initscr() + + # Turn off echoing of keys, and enter cbreak mode, + # where no buffering is performed on keyboard input + curses.noecho() + curses.cbreak() + + # In keypad mode, escape sequences for special keys + # (like the cursor keys) will be interpreted and + # a special value like curses.KEY_LEFT will be returned + stdscr.keypad(1) + + # Start color, too. Harmless if the terminal doesn't have + # color; user can test with has_color() later on. The try/catch + # works around a minor bit of over-conscientiousness in the curses + # module -- the error return from C start_color() is ignorable. + try: + curses.start_color() + except: + pass + + return func(stdscr, *args, **kwds) + finally: + # Set everything back to normal + if 'stdscr' in locals(): + stdscr.keypad(0) + curses.echo() + curses.nocbreak() + curses.endwin() diff --git a/lib-python/3/email/test/__init__.py b/lib-python/3/email/test/__init__.py new file mode 100644 diff --git a/lib-python/3/email/test/data/PyBanner048.gif b/lib-python/3/email/test/data/PyBanner048.gif new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..1a5c87f647fbf33e5b46103119c9fd42afbe9e5d GIT binary patch [cut] diff --git a/lib-python/3/email/test/data/audiotest.au b/lib-python/3/email/test/data/audiotest.au new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f76b0501b8c61b4fabbb3715b69a9434a42469cb GIT binary patch [cut] diff --git a/lib-python/3/email/test/data/msg_01.txt b/lib-python/3/email/test/data/msg_01.txt new file mode 100644 --- /dev/null +++ b/lib-python/3/email/test/data/msg_01.txt @@ -0,0 +1,19 @@ +Return-Path: +Delivered-To: bbb at zzz.org +Received: by mail.zzz.org (Postfix, from userid 889) + id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT) +MIME-Version: 1.0 From pypy.commits at gmail.com Tue Jan 12 22:24:25 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:25 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Revert change to cffi backend test. Message-ID: <5695c369.42b81c0a.efddf.24eb@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81717:ecb018d6001d Date: 2016-01-10 18:17 -0500 http://bitbucket.org/pypy/pypy/changeset/ecb018d6001d/ Log: Revert change to cffi backend test. diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.3.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): From pypy.commits at gmail.com Tue Jan 12 22:24:26 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:26 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Add test to see if sys.exceptionhook is used when outputting errors in code.runsource Message-ID: <5695c36a.cf821c0a.f83f5.28eb@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81718:dc63814c5a2d Date: 2016-01-10 19:35 -0500 http://bitbucket.org/pypy/pypy/changeset/dc63814c5a2d/ Log: Add test to see if sys.exceptionhook is used when outputting errors in code.runsource diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -49,3 +49,17 @@ """ assert expected_header in result assert result.endswith("NameError: name '_diana_' is not defined\n") + + def test_excepthook(self): + interp = self.get_interp() + interp.runsource("import sys") + print(interp.runsource(""" +def ignore_failure(type, value, traceback): + pass +""")) + print(interp.runsource("sys.excepthook = ignore_failure")) + print(interp.runsource("raise TypeError('Invalid Type')")) + result = interp.out.getvalue() + # Since we have a custom excepthook, the write() method should not + # be called + assert result == "" From pypy.commits at gmail.com Tue Jan 12 22:24:28 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:28 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Get rid of debugging. Message-ID: <5695c36c.4a5ec20a.af75e.fffff281@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81719:8a7fa79cdf6a Date: 2016-01-10 19:37 -0500 http://bitbucket.org/pypy/pypy/changeset/8a7fa79cdf6a/ Log: Get rid of debugging. diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -53,13 +53,13 @@ def test_excepthook(self): interp = self.get_interp() interp.runsource("import sys") - print(interp.runsource(""" + interp.runsource(""" def ignore_failure(type, value, traceback): pass -""")) - print(interp.runsource("sys.excepthook = ignore_failure")) - print(interp.runsource("raise TypeError('Invalid Type')")) +""") + interp.runsource("sys.excepthook = ignore_failure") + interp.runsource("raise TypeError('Invalid Type')") result = interp.out.getvalue() # Since we have a custom excepthook, the write() method should not - # be called + # be called, so out should never have been written to. assert result == "" From pypy.commits at gmail.com Tue Jan 12 22:24:30 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:30 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Make the diff from the stdlib smaller. Message-ID: <5695c36e.c74fc20a.c7529.04b1@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81720:f1050dffd9d5 Date: 2016-01-10 21:09 -0500 http://bitbucket.org/pypy/pypy/changeset/f1050dffd9d5/ Log: Make the diff from the stdlib smaller. diff --git a/lib-python/3/code.py b/lib-python/3/code.py --- a/lib-python/3/code.py +++ b/lib-python/3/code.py @@ -105,9 +105,10 @@ The output is written by self.write(), below. """ - type, value, sys.last_traceback = sys.exc_info() + type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value + sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: @@ -125,7 +126,7 @@ else: # If someone has set sys.excepthook, we let that take precedence # over self.write - sys.excepthook(type, value, self.last_traceback) + sys.excepthook(type, value, tb) def showtraceback(self): """Display the exception that just occurred. From pypy.commits at gmail.com Tue Jan 12 22:24:31 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:31 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Revert unnecessary change to frozen importlib. The changes to the general pickle module make this no longer needed. Message-ID: <5695c36f.2a06c20a.83684.ffffa9d0@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81721:52ae5b33f67d Date: 2016-01-10 22:00 -0500 http://bitbucket.org/pypy/pypy/changeset/52ae5b33f67d/ Log: Revert unnecessary change to frozen importlib. The changes to the general pickle module make this no longer needed. diff --git a/pypy/module/_frozen_importlib/__init__.py b/pypy/module/_frozen_importlib/__init__.py --- a/pypy/module/_frozen_importlib/__init__.py +++ b/pypy/module/_frozen_importlib/__init__.py @@ -30,7 +30,7 @@ space.wrap(space.builtin)) code_w.exec_code(space, self.w_dict, self.w_dict) - self.w_import = space.wrap(interp_import.__import__) + self.w_import = space.wrap(interp_import.import_with_frames_removed) def startup(self, space): """Copy our __import__ to builtins.""" diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py --- a/pypy/module/_frozen_importlib/interp_import.py +++ b/pypy/module/_frozen_importlib/interp_import.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError @interp2app -def __import__(space, __args__): +def import_with_frames_removed(space, __args__): try: return space.call_args( space.getbuiltinmodule('_frozen_importlib').getdictvalue( From pypy.commits at gmail.com Tue Jan 12 22:24:33 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:33 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Remove unnecessary comments. Message-ID: <5695c371.4f911c0a.e123f.24de@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81722:26a69bf1f021 Date: 2016-01-10 23:02 -0500 http://bitbucket.org/pypy/pypy/changeset/26a69bf1f021/ Log: Remove unnecessary comments. diff --git a/pypy/module/__pypy__/test/test_stderrprinter.py b/pypy/module/__pypy__/test/test_stderrprinter.py --- a/pypy/module/__pypy__/test/test_stderrprinter.py +++ b/pypy/module/__pypy__/test/test_stderrprinter.py @@ -7,9 +7,6 @@ p.close() # this should be a no-op p.flush() # this should be a no-op assert p.fileno() == 2 - # It doesn't make sense to assert this. Stderror could be a tty - # (the terminal) or not, depending on how we are running the tests. - # assert p.isatty() assert p.write('foo') == 3 raises(TypeError, p.write, b'foo') diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -636,8 +636,6 @@ def test_del_from_sys_modules(self): try: import del_sys_module - #This raises a KeyError in cpython, - #not an import error except KeyError: pass # ok else: diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -32,7 +32,6 @@ w_sys.flush_std_files(space) msg = space.bytes_w(space.call_function(w_read)) - # IOError has become an alias for OSError assert 'Exception OSError' in msg finally: space.setattr(w_sys, space.wrap('stdout'), w_sys.get('__stdout__')) diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -21,8 +21,6 @@ def test_cause_tb(self): interp = self.get_interp() - # (Arbitrarily) Changing to TypeError as IOError is now an alias of - # OSError, making testing confusing interp.runsource('raise TypeError from OSError') result = interp.out.getvalue() expected_header = """OSError From pypy.commits at gmail.com Tue Jan 12 22:24:35 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:35 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Turn set(char) into set([char]) as per review. Message-ID: <5695c373.cdb81c0a.ad17d.fffff0c2@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81723:585cb9f851a0 Date: 2016-01-10 23:48 -0500 http://bitbucket.org/pypy/pypy/changeset/585cb9f851a0/ Log: Turn set(char) into set([char]) as per review. diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -397,7 +397,7 @@ seen = next(diter) pckl = pickle.dumps(diter) result = pickle.loads(pckl) - assert set(result) == (set('235') - set(seen)) + assert set(result) == (set('235') - set([seen])) def test_pickle_reversed(self): import pickle From pypy.commits at gmail.com Tue Jan 12 22:24:36 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:36 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Cleanup handling of __loader__ in test_mod Message-ID: <5695c374.0f811c0a.80258.ffffe78c@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81724:6efa96d1fbb3 Date: 2016-01-10 23:48 -0500 http://bitbucket.org/pypy/pypy/changeset/6efa96d1fbb3/ Log: Cleanup handling of __loader__ in test_mod diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -391,18 +391,13 @@ def test_mod(self): import _io, _frozen_importlib - typemods = dict((t, t.__module__) for t in vars(_io).values() - if isinstance(t, type)) + typemods = dict((t, t.__module__) for name, t in vars(_io).items() + if isinstance(t, type) and name != '__loader__') for t, mod in typemods.items(): if t is _io.BlockingIOError: assert mod == 'builtins' elif t is _io.UnsupportedOperation: assert mod == 'io' - #TODO: Make sure this is a reasonable thing to do. Check if there is - #a cleaner way to do these checks or if these checks even make sense - #in general. They seem really brittle. - elif t is _frozen_importlib.BuiltinImporter: - assert mod == "_frozen_importlib" else: assert mod == '_io' From pypy.commits at gmail.com Tue Jan 12 22:24:38 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:38 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Cleanup newline in test_import_lock to match base stdlib. Message-ID: <5695c376.cb571c0a.75fb7.ffffee32@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81725:d2d4bbff12e4 Date: 2016-01-10 23:49 -0500 http://bitbucket.org/pypy/pypy/changeset/d2d4bbff12e4/ Log: Cleanup newline in test_import_lock to match base stdlib. diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py --- a/pypy/module/thread/test/test_import_lock.py +++ b/pypy/module/thread/test/test_import_lock.py @@ -96,8 +96,8 @@ original_acquire() importlock.count = 0 monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock) + # An already imported module - importhook(space, 'sys') assert importlock.count == 0 # A new module From pypy.commits at gmail.com Tue Jan 12 22:24:41 2016 From: pypy.commits at gmail.com (marky1991) Date: Tue, 12 Jan 2016 19:24:41 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Remove unused frozen_importlib import. Message-ID: <5695c379.4473c20a.ddf72.ffffebce@mx.google.com> Author: marky1991 Branch: py3.3 Changeset: r81726:c6dfbae07193 Date: 2016-01-11 00:09 -0500 http://bitbucket.org/pypy/pypy/changeset/c6dfbae07193/ Log: Remove unused frozen_importlib import. diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -390,7 +390,7 @@ raises(TypeError, pickle.dumps, f, protocol) def test_mod(self): - import _io, _frozen_importlib + import _io typemods = dict((t, t.__module__) for name, t in vars(_io).items() if isinstance(t, type) and name != '__loader__') for t, mod in typemods.items(): From pypy.commits at gmail.com Tue Jan 12 22:45:35 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 19:45:35 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Implement gc_thread_run() operation. Message-ID: <5695c85f.cb941c0a.4fbee.ffffc5fa@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81727:323d6dc97d07 Date: 2016-01-13 01:28 +0100 http://bitbucket.org/pypy/pypy/changeset/323d6dc97d07/ Log: Implement gc_thread_run() operation. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1480,6 +1480,9 @@ else: assert False, "No subop {}".format(subopnum.value) + def op_gc_thread_run(self, result): + self.op_direct_call(result, get_repr(rpy_threadlocalref_ensure)) + def op_gc_thread_die(self, result): self.op_direct_call(result, get_repr(rpy_tls_thread_die)) @@ -1752,6 +1755,8 @@ lltype.SignedLongLong, eci) rpy_tls_program_init = extfunc('RPython_ThreadLocals_ProgramInit', [], lltype.Void, eci) +rpy_threadlocalref_ensure = extfunc('RPY_THREADLOCALREF_ENSURE', [], + lltype.Void, eci) rpy_tls_thread_die = extfunc('RPython_ThreadLocals_ThreadDie', [], lltype.Void, eci) del eci From pypy.commits at gmail.com Tue Jan 12 22:45:37 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 19:45:37 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Implement a LLVM plugin pass which internalizes hidden symbols. Message-ID: <5695c861.4473c20a.ddf72.ffffeef6@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81728:0edd0ee170a8 Date: 2016-01-13 02:16 +0100 http://bitbucket.org/pypy/pypy/changeset/0edd0ee170a8/ Log: Implement a LLVM plugin pass which internalizes hidden symbols. "Internalizes" means that it'll set internal linkage, which is comparable to adding the static keyword in C. This should be correct because the LLVM backend links all source files into one module before compiling. diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -95,4 +95,4 @@ ^.git/ ^release/ ^rpython/_cache$ -^rpython/translator/llvm/PyPyGC.so$ +^rpython/translator/llvm/.*\.so$ diff --git a/rpython/translator/llvm/InternalizeHiddenSymbols.cpp b/rpython/translator/llvm/InternalizeHiddenSymbols.cpp new file mode 100644 --- /dev/null +++ b/rpython/translator/llvm/InternalizeHiddenSymbols.cpp @@ -0,0 +1,47 @@ +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/Transforms/IPO/PassManagerBuilder.h" + +using namespace llvm; + +namespace { +struct InternalizeHiddenSymbols : public ModulePass { + static char ID; + + InternalizeHiddenSymbols() : ModulePass(ID) {} + + void getAnalysisUsage(AnalysisUsage &AU) const override {} + + bool runOnModule(Module &M) override; + + const char *getPassName() const override { + return "Set internal linkage on hidden symbols."; + } +}; +} + +char InternalizeHiddenSymbols::ID = 0; + +static bool InternalizeIfHidden(GlobalValue &GV) { + if (GV.getVisibility() != GlobalValue::HiddenVisibility) + return false; + GV.setLinkage(GlobalValue::InternalLinkage); + return true; +} + +bool InternalizeHiddenSymbols::runOnModule(Module &M) { + bool Changed = false; + + for (auto &GV : M.globals()) + Changed |= InternalizeIfHidden(GV); + for (auto &F : M.functions()) + Changed |= InternalizeIfHidden(F); + + return Changed; +} + +static RegisterStandardPasses RegisterMyPass( + PassManagerBuilder::EP_ModuleOptimizerEarly, + [](const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { + PM.add(new InternalizeHiddenSymbols()); + }); diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1953,7 +1953,9 @@ # optimize this module optimized_file = self.work_dir.join('output_optimized.' + ('ll' if llvm_assembly else 'bc')) - opt_args = ['opt', '-O3', linked_file, '-o', optimized_file] + opt_args = ['opt', '-load', + self._compile_llvm_plugin('InternalizeHiddenSymbols.cpp'), + '-O3', linked_file, '-o', optimized_file] self._execute(opt_args + (['-S'] if llvm_assembly else [])) # compile object file @@ -1984,14 +1986,14 @@ self._execute(link_args + [object_file, '-o', output_file]) return output_file - def _compile_llvmgcroot(self): + def _compile_llvm_plugin(self, relative_filename): this_file = local(__file__) - gc_cpp = this_file.new(basename='PyPyGC.cpp') - gc_lib = this_file.new(purebasename='PyPyGC', - ext=self.translator.platform.so_ext) + plugin_cpp = this_file.new(basename=relative_filename) + plugin_so = plugin_cpp.new(ext=self.translator.platform.so_ext) cflags = cmdexec('llvm-config --cxxflags').strip() + ' -fno-rtti' - cmdexec('clang {} -shared {} -o {}'.format(cflags, gc_cpp, gc_lib)) - return gc_lib + cmdexec('clang {} -shared {} -o {}'.format(cflags, plugin_cpp, + plugin_so)) + return plugin_so def compile(self, exe_name): return self._compile() From pypy.commits at gmail.com Tue Jan 12 22:45:39 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 19:45:39 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Set visibility of call wrappers to 'hidden'. Message-ID: <5695c863.034cc20a.16a73.fffff7c8@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81729:648a0d5d4bf2 Date: 2016-01-13 02:35 +0100 http://bitbucket.org/pypy/pypy/changeset/648a0d5d4bf2/ Log: Set visibility of call wrappers to 'hidden'. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -704,7 +704,7 @@ else: wrapper_name, source = rffi._write_call_wrapper( obj._name, database.unique_name(obj._name, False), - obj._TYPE) + obj._TYPE, 'RPY_EXTERN ') name = '@' + wrapper_name database.genllvm.sources.append(source) From pypy.commits at gmail.com Tue Jan 12 22:45:41 2016 From: pypy.commits at gmail.com (mjacob) Date: Tue, 12 Jan 2016 19:45:41 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix translation. Message-ID: <5695c865.8f7e1c0a.cf222.2967@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r81730:26dd3a53c0c4 Date: 2016-01-13 04:44 +0100 http://bitbucket.org/pypy/pypy/changeset/26dd3a53c0c4/ Log: Fix translation. I think pickling of enumerate objects is still a bit broken. I'll verify after translation. diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -269,7 +269,7 @@ def descr___reduce__(self, space): return space.newtuple([space.type(self), - space.newtuple([self.w_iter, self.w_index])]) + space.newtuple([self.w_iter_or_list, self.w_index])]) W_Enumerate.typedef = TypeDef("enumerate", __new__=interp2app(W_Enumerate.descr___new__), From pypy.commits at gmail.com Tue Jan 12 23:42:15 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 12 Jan 2016 20:42:15 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Remove '_pure' suffix from getfield operations (moving it to descr) Message-ID: <5695d5a7.e16ec20a.f0264.ffffd587@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81731:94c0a9980079 Date: 2016-01-12 12:22 -0500 http://bitbucket.org/pypy/pypy/changeset/94c0a9980079/ Log: Remove '_pure' suffix from getfield operations (moving it to descr) diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -188,7 +188,6 @@ else: res = "" is_guard = op.is_guard() - is_pure = "" if op.getdescr() is not None: descr = op.getdescr() if is_guard and self.guard_number: @@ -196,8 +195,6 @@ r = "" % hash else: r = self.repr_of_descr(descr) - if op.is_getfield() and op.is_always_pure(): - is_pure = "_pure" if args: args += ', descr=' + r else: @@ -207,7 +204,7 @@ for arg in op.getfailargs()]) + ']' else: fail_args = '' - return s_offset + res + op.getopname() + is_pure + '(' + args + ')' + fail_args + return s_offset + res + op.getopname() + '(' + args + ')' + fail_args def _log_operations(self, inputargs, operations, ops_offset=None, From pypy.commits at gmail.com Tue Jan 12 23:42:17 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 12 Jan 2016 20:42:17 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Put purity information of field into repr of its descriptor Message-ID: <5695d5a9.ca061c0a.f98cf.fffff8df@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81732:f4e32df544ed Date: 2016-01-12 12:49 -0500 http://bitbucket.org/pypy/pypy/changeset/f4e32df544ed/ Log: Put purity information of field into repr of its descriptor diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr From pypy.commits at gmail.com Tue Jan 12 23:42:19 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 12 Jan 2016 20:42:19 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge with default Message-ID: <5695d5ab.25fac20a.12430.4ceb@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81733:fd2fff063c7e Date: 2016-01-12 14:47 -0500 http://bitbucket.org/pypy/pypy/changeset/fd2fff063c7e/ Log: Merge with default diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -330,15 +330,14 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_external_call(self, op, seen=None): + def analyze_external_call(self, funcobj, seen=None): try: - funcobj = op.args[0].value._obj if funcobj.random_effects_on_gcobjs: return True - except (AttributeError, lltype.DelayedPointer): + except AttributeError: return True # better safe than sorry return super(RandomEffectsAnalyzer, self).analyze_external_call( - op, seen) + funcobj, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -21,7 +21,10 @@ pass -class CachedField(object): +class AbstractCachedEntry(object): + """ abstract base class abstracting over the difference between caching + struct fields and array items. """ + def __init__(self): # Cache information for a field descr, or for an (array descr, index) # pair. It can be in one of two states: @@ -39,19 +42,15 @@ self.cached_infos = [] self.cached_structs = [] self._lazy_setfield = None - self._lazy_setfield_registered = False - def register_dirty_field(self, structop, info): + def register_info(self, structop, info): + # invariant: every struct or array ptr info, that is not virtual and + # that has a non-None entry at + # info._fields[descr.get_index()] + # must be in cache_infos self.cached_structs.append(structop) self.cached_infos.append(info) - def invalidate(self, descr): - for opinfo in self.cached_infos: - assert isinstance(opinfo, info.AbstractStructPtrInfo) - opinfo._fields[descr.get_index()] = None - self.cached_infos = [] - self.cached_structs = [] - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): assert self._lazy_setfield is None @@ -72,7 +71,7 @@ def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) - arg1 = optheap.get_box_replacement(self._getvalue(op)) + arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): self.force_lazy_setfield(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) @@ -87,11 +86,8 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield, and register - # myself in the optheap's _lazy_setfields_and_arrayitems list + # common case: store the 'op' as lazy_setfield self._lazy_setfield = op - #if not self._lazy_setfield_registered: - # self._lazy_setfield_registered = True else: # this is the case where the pending setfield ends up @@ -111,25 +107,13 @@ self.force_lazy_setfield(optheap, descr) if self._lazy_setfield is not None: op = self._lazy_setfield - return optheap.get_box_replacement(self._getvalue(op)) + return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) if res is not None: return res.get_box_replacement() return None - def _getvalue(self, op): - return op.getarg(1) - - def _getfield(self, opinfo, descr, optheap, true_force=True): - res = opinfo.getfield(descr, optheap) - if isinstance(res, PreambleOp): - if not true_force: - return res.op - res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) - return res - def force_lazy_setfield(self, optheap, descr, can_cache=True): op = self._lazy_setfield if op is not None: @@ -151,25 +135,74 @@ # back in the cache: the value of this particular structure's # field. opinfo = optheap.ensure_ptr_info_arg0(op) - self._setfield(op, opinfo, optheap) + self.put_field_back_to_info(op, opinfo, optheap) elif not can_cache: self.invalidate(descr) - def _setfield(self, op, opinfo, optheap): + + # abstract methods + + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ + raise NotImplementedError("abstract method") + + def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ + raise NotImplementedError("abstract method") + + def _getfield(self, opinfo, descr, optheap, true_force=True): + raise NotImplementedError("abstract method") + + def invalidate(self, descr): + """ clear all the cached knowledge in the infos in self.cached_infos. + """ + raise NotImplementedError("abstract method") + + +class CachedField(AbstractCachedEntry): + def _get_rhs_from_set_op(self, op): + return op.getarg(1) + + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setfield(op.getdescr(), struct, arg, optheap, self) + opinfo.setfield(op.getdescr(), struct, arg, optheap=optheap, cf=self) -class ArrayCachedField(CachedField): + def _getfield(self, opinfo, descr, optheap, true_force=True): + res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos + if isinstance(res, PreambleOp): + if not true_force: + return res.op + res = optheap.optimizer.force_op_from_preamble(res) + opinfo.setfield(descr, None, res, optheap=optheap) + return res + + def invalidate(self, descr): + for opinfo in self.cached_infos: + assert isinstance(opinfo, info.AbstractStructPtrInfo) + opinfo._fields[descr.get_index()] = None + self.cached_infos = [] + self.cached_structs = [] + + +class ArrayCachedItem(AbstractCachedEntry): def __init__(self, index): self.index = index - CachedField.__init__(self) + AbstractCachedEntry.__init__(self) - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): return op.getarg(2) def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getitem(descr, self.index, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.ArrayPtrInfo): + assert opinfo in self.cached_infos if (isinstance(res, PreambleOp) and optheap.optimizer.cpu.supports_guard_gc_type): if not true_force: @@ -179,10 +212,10 @@ opinfo.setitem(descr, index, None, res, optheap=optheap) return res - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) + opinfo.setitem(op.getdescr(), self.index, struct, arg, optheap=optheap, cf=self) def invalidate(self, descr): for opinfo in self.cached_infos: @@ -201,15 +234,11 @@ self.postponed_op = None - # XXXX the rest is old - # cached array items: {array descr: {index: CachedField}} - #self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # - self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False @@ -234,7 +263,7 @@ descrkeys = self.cached_fields.keys() if not we_are_translated(): # XXX Pure operation of boxes that are cached in several places will - # only be removed from the peeled loop when red from the first + # only be removed from the peeled loop when read from the first # place discovered here. This is far from ideal, as it makes # the effectiveness of our optimization a bit random. It should # howevere always generate correct results. For tests we dont @@ -249,14 +278,7 @@ d.produce_potential_short_preamble_ops(self.optimizer, sb, descr, index) - def register_dirty_field(self, descr, op, info): - self.field_cache(descr).register_dirty_field(op, info) - - def register_dirty_array_field(self, arraydescr, op, index, info): - self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info) - def clean_caches(self): - del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) @@ -285,7 +307,7 @@ try: cf = submap[index] except KeyError: - cf = submap[index] = ArrayCachedField(index) + cf = submap[index] = ArrayCachedItem(index) return cf def emit_operation(self, op): @@ -489,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_setfield(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): @@ -507,7 +529,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) + structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -545,12 +567,12 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) - # the remember the result of reading the array item + # then remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), - self.get_box_replacement(op), cf, - self) + self.get_box_replacement(op), optheap=self, + cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -196,28 +196,28 @@ def all_items(self): return self._fields - def setfield(self, descr, struct, op, optheap=None, cf=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) assert isinstance(op, AbstractValue) - self._fields[descr.get_index()] = op + self._fields[fielddescr.get_index()] = op if cf is not None: assert not self.is_virtual() assert struct is not None - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) - def getfield(self, descr, optheap=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) - return self._fields[descr.get_index()] + def getfield(self, fielddescr, optheap=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) + return self._fields[fielddescr.get_index()] def _force_elements(self, op, optforce, descr): if self._fields is None: return - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], - descr=flddescr) + descr=fielddescr) self._fields[i] = None optforce.emit_operation(setfieldop) @@ -249,16 +249,16 @@ if fieldinfo and fieldinfo.is_virtual(): fieldinfo.visitor_walk_recursive(op, visitor, optimizer) - def produce_short_preamble_ops(self, structbox, descr, index, optimizer, + def produce_short_preamble_ops(self, structbox, fielddescr, index, optimizer, shortboxes): if self._fields is None: return - if descr.get_index() >= len(self._fields): + if fielddescr.get_index() >= len(self._fields): # we don't know about this item return - op = optimizer.get_box_replacement(self._fields[descr.get_index()]) - opnum = OpHelpers.getfield_for_descr(descr) - getfield_op = ResOperation(opnum, [structbox], descr=descr) + op = optimizer.get_box_replacement(self._fields[fielddescr.get_index()]) + opnum = OpHelpers.getfield_for_descr(fielddescr) + getfield_op = ResOperation(opnum, [structbox], descr=fielddescr) shortboxes.add_heap_op(op, getfield_op) def _is_immutable_and_filled_with_constants(self, optimizer, memo=None): @@ -294,12 +294,12 @@ return True def _force_elements_immutable(self, descr, constptr, optforce): - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] subbox = optforce.force_box(fld) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, - flddescr, constptr, subbox) + fielddescr, constptr, subbox) class InstancePtrInfo(AbstractStructPtrInfo): _attrs_ = ('_known_class',) @@ -505,6 +505,7 @@ info._items = self._items[:] def _force_elements(self, op, optforce, descr): + # XXX descr = op.getdescr() const = optforce.new_const_item(self.descr) for i in range(self.length): @@ -523,7 +524,7 @@ optforce.emit_operation(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): @@ -531,7 +532,7 @@ self._items[index] = op if cf is not None: assert not self.is_virtual() - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getitem(self, descr, index, optheap=None): if self._items is None or index >= len(self._items): @@ -626,13 +627,13 @@ i = 0 fielddescrs = op.getdescr().get_all_fielddescrs() for index in range(self.length): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: fld = self._items[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], - descr=flddescr) + descr=fielddescr) optforce.emit_operation(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here @@ -645,7 +646,7 @@ fielddescrs = self.descr.get_all_fielddescrs() i = 0 for index in range(self.getlength()): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: itemop = self._items[i] if (itemop is not None and not isinstance(itemop, Const)): @@ -691,21 +692,21 @@ optheap.const_infos[ref] = info return info - def getfield(self, descr, optheap=None): - info = self._get_info(descr.get_parent_descr(), optheap) - return info.getfield(descr) + def getfield(self, fielddescr, optheap=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + return info.getfield(fielddescr) def getitem(self, descr, index, optheap=None): info = self._get_array_info(descr, optheap) return info.getitem(descr, index) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): info = self._get_array_info(descr, optheap) - info.setitem(descr, index, struct, op, cf) + info.setitem(descr, index, struct, op, optheap=optheap, cf=cf) - def setfield(self, descr, struct, op, optheap=None, cf=None): - info = self._get_info(descr.get_parent_descr(), optheap) - info.setfield(descr, struct, op, optheap, cf) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + info.setfield(fielddescr, struct, op, optheap=optheap, cf=cf) def is_null(self): return not bool(self._const.getref_base()) diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -81,7 +81,7 @@ assert index >= 0 cf = optheap.arrayitem_cache(descr, index) opinfo.setitem(self.getfield_op.getdescr(), index, self.res, - pop, cf, optheap=optheap) + pop, optheap, cf) def repr(self, memo): return "HeapOp(%s, %s)" % (self.res.repr(memo), diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -35,15 +35,11 @@ return True return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except lltype.DelayedPointer: - return True + def analyze_external_call(self, funcobj, seen=None): if getattr(funcobj, 'random_effects_on_gcobjs', False): return True - return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_external_call( + self, funcobj, seen) def analyze_simple_operation(self, op, graphinfo): if op.opname in ('malloc', 'malloc_varsize'): flags = op.args[1].value diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -22,8 +22,7 @@ log.WARNING("Unknown operation: %s" % op.opname) return True - def analyze_external_call(self, op, seen=None): - fnobj = op.args[0].value._obj + def analyze_external_call(self, fnobj, seen=None): return getattr(fnobj, 'canraise', True) analyze_exceptblock = None # don't call this diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -21,12 +21,8 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -55,11 +55,7 @@ def analyze_startblock(self, block, seen=None): return self.bottom_result() - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except DelayedPointer: - return self.bottom_result() + def analyze_external_call(self, funcobj, seen=None): result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper @@ -80,12 +76,22 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - graph = get_graph(op.args[0], self.translator) - if graph is None: - x = self.analyze_external_call(op, seen) + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() + if getattr(funcobj, 'external', None) is not None: + x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x + graph = funcobj.graph + assert graph is not None x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -204,8 +204,7 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - z = lltype.functionptr(lltype.FuncType([lltype.Signed], lltype.Signed), - 'foobar') + z = llexternal('z', [lltype.Signed], lltype.Signed) def g(x): return z(x) t, ra = self.translate(g, [int]) diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -1,7 +1,7 @@ import random from rpython.tool.algo.unionfind import UnionFind -from rpython.translator.backendopt.graphanalyze import Dependency -from rpython.translator.backendopt.graphanalyze import DependencyTracker +from rpython.translator.backendopt.graphanalyze import (Dependency, + DependencyTracker, BoolGraphAnalyzer) class FakeGraphAnalyzer: @@ -49,3 +49,30 @@ method1 = rectrack(n, tracker) method2 = expected(n) assert method1 == method2 + + +def test_delayed_fnptr(): + from rpython.flowspace.model import SpaceOperation + from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.translator.translator import TranslationContext + t = TranslationContext() + t.buildannotator() + t.buildrtyper() + annhelper = MixLevelHelperAnnotator(t.rtyper) + def f(): + pass + c_f = annhelper.constfunc(f, [], None) + op = SpaceOperation('direct_call', [c_f], None) + analyzer = BoolGraphAnalyzer(t) + assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) From pypy.commits at gmail.com Tue Jan 12 23:42:21 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 12 Jan 2016 20:42:21 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Directly consult descr for getfield operations Message-ID: <5695d5ad.8a58c20a.e6950.0e27@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81734:90797881ada9 Date: 2016-01-12 17:16 -0500 http://bitbucket.org/pypy/pypy/changeset/90797881ada9/ Log: Directly consult descr for getfield operations Only consider the opcode for is_always_pure on resoperations diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -515,13 +515,14 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): - if op.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: resbox = self.optimizer.constant_fold(op) self.optimizer.make_constant(op, resbox) return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -529,7 +530,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -75,7 +75,7 @@ dispatch_opt(self, op) def optimize_default(self, op): - canfold = op.is_always_pure() and not op.is_getfield() + canfold = op.is_always_pure() if op.is_ovf(): self.postponed_op = op return diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -249,7 +249,7 @@ def is_pure_getfield(opnum, descr): if opnum not in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R): return False - return descr is not None and descr.is_always_pure() != False + return descr is not None and descr.is_always_pure() class AbstractResOp(AbstractResOpOrInputArg): """The central ResOperation class, representing one operation.""" @@ -417,6 +417,8 @@ return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST def is_always_pure(self): + # Tells whether an operation is pure based solely on the opcode. + # Other operations (e.g. getfield ops) may be pure in some cases are well. return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): @@ -567,11 +569,6 @@ _descr = None - def is_always_pure(self): - if self.is_getfield(): - return self._descr.is_always_pure() != False - return AbstractResOp.is_always_pure(self) - def getdescr(self): return self._descr From pypy.commits at gmail.com Tue Jan 12 23:42:22 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 12 Jan 2016 20:42:22 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge with default Message-ID: <5695d5ae.a453c20a.c8259.6541@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81735:2351ba72df1e Date: 2016-01-12 21:55 -0500 http://bitbucket.org/pypy/pypy/changeset/2351ba72df1e/ Log: Merge with default diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -331,11 +331,8 @@ class RandomEffectsAnalyzer(BoolGraphAnalyzer): def analyze_external_call(self, funcobj, seen=None): - try: - if funcobj.random_effects_on_gcobjs: - return True - except AttributeError: - return True # better safe than sorry + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( funcobj, seen) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -36,7 +36,7 @@ return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) def analyze_external_call(self, funcobj, seen=None): - if getattr(funcobj, 'random_effects_on_gcobjs', False): + if funcobj.random_effects_on_gcobjs: return True return graphanalyze.BoolGraphAnalyzer.analyze_external_call( self, funcobj, seen) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -22,7 +22,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, + Ptr, ContainerType, FuncType, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper import rclass @@ -876,18 +876,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -90,8 +89,10 @@ if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = funcobj.graph - assert graph is not None + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Wed Jan 13 03:09:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 00:09:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: renamed "reg" to "r" Message-ID: <5696061e.85e41c0a.1595e.3679@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81736:a0afdf455662 Date: 2016-01-13 09:08 +0100 http://bitbucket.org/pypy/pypy/changeset/a0afdf455662/ Log: renamed "reg" to "r" diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -3,7 +3,7 @@ import math from rpython.jit.backend.zarch import conditions as con from rpython.jit.backend.zarch import masks as msk -from rpython.jit.backend.zarch import registers as reg +from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch.assembler import AssemblerZARCH from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.test.support import run_asm @@ -37,11 +37,11 @@ def gen_func_prolog(mc): STACK_FRAME_SIZE = 40 - mc.STMG(r.r11, r.r15, l.addr(-STACK_FRAME_SIZE, r.SP)) - mc.AHI(r.SP, l.imm(-STACK_FRAME_SIZE)) + mc.STMG(r.r11, r.r15, loc.addr(-STACK_FRAME_SIZE, r.SP)) + mc.AHI(r.SP, loc.imm(-STACK_FRAME_SIZE)) def gen_func_epilog(mc): - mc.LMG(r.r11, r.r15, l.addr(0, r.SP)) + mc.LMG(r.r11, r.r15, loc.addr(0, r.SP)) mc.BCR_rr(0xf, r.r14.value) # jmp to def isclose(a,b, rel_tol=1e-9, abs_tol=0.0): @@ -59,7 +59,7 @@ def __enter__(self): self.lit_label.__enter__() - self.asm.mc.BRAS(reg.r13, loc.imm(0)) + self.asm.mc.BRAS(r.r13, loc.imm(0)) return self def __exit__(self, a, b, c): @@ -124,69 +124,69 @@ assert self.mc.LG_byte_count == 6 def test_load_small_int_to_reg(self): - self.a.mc.LGHI(reg.r2, loc.imm(123)) - self.a.jmpto(reg.r14) + self.a.mc.LGHI(r.r2, loc.imm(123)) + self.a.jmpto(r.r14) assert run_asm(self.a) == 123 def test_prolog_epilog(self): gen_func_prolog(self.a.mc) - self.a.mc.LGHI(reg.r2, loc.imm(123)) + self.a.mc.LGHI(r.r2, loc.imm(123)) gen_func_epilog(self.a.mc) assert run_asm(self.a) == 123 def test_simple_func(self): # enter - self.a.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.SP)) - self.a.mc.AHI(reg.SP, loc.imm(-96)) + self.a.mc.STMG(r.r11, r.r15, loc.addr(-96, r.SP)) + self.a.mc.AHI(r.SP, loc.imm(-96)) # from the start of BRASL to end of jmpto there are 8+6 bytes - self.a.mc.BRASL(reg.r14, loc.imm(8+6)) - self.a.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.SP)) - self.a.jmpto(reg.r14) + self.a.mc.BRASL(r.r14, loc.imm(8+6)) + self.a.mc.LMG(r.r11, r.r15, loc.addr(0, r.SP)) + self.a.jmpto(r.r14) addr = self.a.mc.get_relative_pos() assert addr & 0x1 == 0 gen_func_prolog(self.a.mc) - self.a.mc.LGHI(reg.r2, loc.imm(321)) + self.a.mc.LGHI(r.r2, loc.imm(321)) gen_func_epilog(self.a.mc) assert run_asm(self.a) == 321 def test_simple_loop(self): - self.a.mc.LGHI(reg.r3, loc.imm(2**15-1)) - self.a.mc.LGHI(reg.r4, loc.imm(1)) + self.a.mc.LGHI(r.r3, loc.imm(2**15-1)) + self.a.mc.LGHI(r.r4, loc.imm(1)) L1 = self.a.mc.get_relative_pos() - self.a.mc.SGR(reg.r3, reg.r4) + self.a.mc.SGR(r.r3, r.r4) LJ = self.a.mc.get_relative_pos() self.a.mc.BRCL(con.GT, loc.imm(L1-LJ)) - self.a.mc.LGR(reg.r2, reg.r3) - self.a.jmpto(reg.r14) + self.a.mc.LGR(r.r2, r.r3) + self.a.jmpto(r.r14) assert run_asm(self.a) == 0 def test_and_imm(self): - self.a.mc.NIHH(reg.r2, loc.imm(0)) - self.a.mc.NIHL(reg.r2, loc.imm(0)) - self.a.mc.NILL(reg.r2, loc.imm(0)) - self.a.mc.NILH(reg.r2, loc.imm(0)) - self.a.jmpto(reg.r14) + self.a.mc.NIHH(r.r2, loc.imm(0)) + self.a.mc.NIHL(r.r2, loc.imm(0)) + self.a.mc.NILL(r.r2, loc.imm(0)) + self.a.mc.NILH(r.r2, loc.imm(0)) + self.a.jmpto(r.r14) assert run_asm(self.a) == 0 def test_or_imm(self): - self.a.mc.OIHH(reg.r2, loc.imm(0xffff)) - self.a.mc.OIHL(reg.r2, loc.imm(0xffff)) - self.a.mc.OILL(reg.r2, loc.imm(0xffff)) - self.a.mc.OILH(reg.r2, loc.imm(0xffff)) - self.a.jmpto(reg.r14) + self.a.mc.OIHH(r.r2, loc.imm(0xffff)) + self.a.mc.OIHL(r.r2, loc.imm(0xffff)) + self.a.mc.OILL(r.r2, loc.imm(0xffff)) + self.a.mc.OILH(r.r2, loc.imm(0xffff)) + self.a.jmpto(r.r14) assert run_asm(self.a) == -1 def test_xor(self): - self.a.mc.XGR(reg.r2, reg.r2) - self.a.jmpto(reg.r14) + self.a.mc.XGR(r.r2, r.r2) + self.a.jmpto(r.r14) assert run_asm(self.a) == 0 def test_literal_pool(self): gen_func_prolog(self.a.mc) - self.a.mc.BRAS(reg.r13, loc.imm(8 + self.mc.BRAS_byte_count)) + self.a.mc.BRAS(r.r13, loc.imm(8 + self.mc.BRAS_byte_count)) self.a.mc.write('\x08\x07\x06\x05\x04\x03\x02\x01') - self.a.mc.LG(reg.r2, loc.addr(0, reg.r13)) + self.a.mc.LG(r.r2, loc.addr(0, r.r13)) gen_func_epilog(self.a.mc) assert run_asm(self.a) == 0x0807060504030201 @@ -216,57 +216,57 @@ self.mc.BRAS(reg, loc.imm(val)) def test_stmg(self): - self.mc.LGR(reg.r2, reg.r15) - self.a.jmpto(reg.r14) + self.mc.LGR(r.r2, r.r15) + self.a.jmpto(r.r14) print hex(run_asm(self.a)) def test_recursion(self): with ActivationRecordCtx(self): with self.label('lit'): - self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.BRAS(r.r13, loc.imm(0)) self.mc.write('\x00\x00\x00\x00\x00\x00\x00\x00') self.jump_here(self.mc.BRAS, 'lit') # recurse X times - self.mc.XGR(reg.r2, reg.r2) - self.mc.LGHI(reg.r9, loc.imm(15)) + self.mc.XGR(r.r2, r.r2) + self.mc.LGHI(r.r9, loc.imm(15)) with self.label('L1'): - self.mc.BRAS(reg.r14, loc.imm(0)) + self.mc.BRAS(r.r14, loc.imm(0)) with ActivationRecordCtx(self, 'rec'): - self.mc.AGR(reg.r2, reg.r9) - self.mc.AHI(reg.r9, loc.imm(-1)) + self.mc.AGR(r.r2, r.r9) + self.mc.AHI(r.r9, loc.imm(-1)) # if not entered recursion, return from activation record # implicitly generated here by with statement self.mc.BRC(con.GT, loc.imm(self.pos('rec') - self.cur())) self.jump_here(self.mc.BRAS, 'L1') # call rec... recursivly - self.jump_to(reg.r14, 'rec') - self.a.jmpto(reg.r14) + self.jump_to(r.r14, 'rec') + self.a.jmpto(r.r14) assert run_asm(self.a) == 120 def test_printf(self): with ActivationRecordCtx(self): with self.label('lit'): - self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.BRAS(r.r13, loc.imm(0)) for c in "hello syscall\n": self.mc.writechar(c) self.jump_here(self.mc.BRAS, 'lit') - self.mc.LGHI(reg.r2, loc.imm(1)) # stderr - self.mc.LA(reg.r3, loc.addr(0, reg.r13)) # char* - self.mc.LGHI(reg.r4, loc.imm(14)) # length + self.mc.LGHI(r.r2, loc.imm(1)) # stderr + self.mc.LA(r.r3, loc.addr(0, r.r13)) # char* + self.mc.LGHI(r.r4, loc.imm(14)) # length # write sys call self.mc.SVC(loc.imm(4)) - self.a.jmpto(reg.r14) + self.a.jmpto(r.r14) assert run_asm(self.a) == 14 def test_float(self): with ActivationRecordCtx(self): with self.label('lit'): - self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.BRAS(r.r13, loc.imm(0)) self.mc.write(BFL(-15.0)) self.jump_here(self.mc.BRAS, 'lit') - self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.CGDBR(reg.r2, msk.RND_CURMODE, reg.f0) - self.a.jmpto(reg.r14) + self.mc.LD(r.f0, loc.addr(0, r.r13)) + self.mc.CGDBR(r.r2, msk.RND_CURMODE, r.f0) + self.a.jmpto(r.r14) assert run_asm(self.a) == -15 @py.test.mark.parametrize("v1,v2,res", [ @@ -281,17 +281,17 @@ with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 16) as mem: with ActivationRecordCtx(self): with self.label('lit'): - self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.BRAS(r.r13, loc.imm(0)) self.mc.write(BFL(v1)) self.mc.write(BFL(v2)) self.mc.write(ADDR(mem)) self.jump_here(self.mc.BRAS, 'lit') - self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.LD(reg.f1, loc.addr(8, reg.r13)) - self.mc.ADBR(reg.f0, reg.f1) - self.mc.LG(reg.r11, loc.addr(16, reg.r13)) - self.mc.STD(reg.f0, loc.addr(0, reg.r11)) - self.a.jmpto(reg.r14) + self.mc.LD(r.f0, loc.addr(0, r.r13)) + self.mc.LD(r.f1, loc.addr(8, r.r13)) + self.mc.ADBR(r.f0, r.f1) + self.mc.LG(r.r11, loc.addr(16, r.r13)) + self.mc.STD(r.f0, loc.addr(0, r.r11)) + self.a.jmpto(r.r14) run_asm(self.a) assert isclose(mem[0],res) @@ -310,11 +310,11 @@ pool.float(v1) pool.float(v2) pool.addr(mem) - self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.MDB(reg.f0, loc.addr(8, reg.r13)) - self.mc.LG(reg.r11, loc.addr(16, reg.r13)) - self.mc.STD(reg.f0, loc.addr(0, reg.r11)) - self.a.jmpto(reg.r14) + self.mc.LD(r.f0, loc.addr(0, r.r13)) + self.mc.MDB(r.f0, loc.addr(8, r.r13)) + self.mc.LG(r.r11, loc.addr(16, r.r13)) + self.mc.STD(r.f0, loc.addr(0, r.r11)) + self.a.jmpto(r.r14) run_asm(self.a) assert isclose(mem[0],res) @@ -323,9 +323,9 @@ with ActivationRecordCtx(self): with LiteralPoolCtx(self) as pool: pool.addr(mem) - self.mc.LZDR(reg.f0) - self.mc.LG(reg.r11, loc.addr(0, reg.r13)) - self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + self.mc.LZDR(r.f0) + self.mc.LG(r.r11, loc.addr(0, r.r13)) + self.mc.STD(r.f0, loc.addr(0, r.r11)) run_asm(self.a) assert isclose(mem[0], 0.0) @@ -335,11 +335,11 @@ with LiteralPoolCtx(self) as pool: pool.single_float(6.66) pool.addr(mem) - self.mc.LEY(reg.f1, loc.addr(0, reg.r13)) + self.mc.LEY(r.f1, loc.addr(0, r.r13)) ## cast short to long! - self.mc.LDEBR(reg.f0, reg.f1) - self.mc.LG(reg.r11, loc.addr(4, reg.r13)) - self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + self.mc.LDEBR(r.f0, r.f1) + self.mc.LG(r.r11, loc.addr(4, r.r13)) + self.mc.STD(r.f0, loc.addr(0, r.r11)) run_asm(self.a) assert isclose(mem[0], 6.66, abs_tol=0.05) @@ -349,11 +349,11 @@ with LiteralPoolCtx(self) as pool: pool.int64(12345) pool.addr(mem) - self.mc.LG(reg.r12, loc.addr(0, reg.r13)) + self.mc.LG(r.r12, loc.addr(0, r.r13)) # cast int to float! - self.mc.CDGBR(reg.f0, reg.r12) - self.mc.LG(reg.r11, loc.addr(8, reg.r13)) - self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + self.mc.CDGBR(r.f0, r.r12) + self.mc.LG(r.r11, loc.addr(8, r.r13)) + self.mc.STD(r.f0, loc.addr(0, r.r11)) run_asm(self.a) assert isclose(mem[0], 12345.0) @@ -362,13 +362,13 @@ with LiteralPoolCtx(self) as pool: pool.float(1.0) pool.float(2.0) - self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.LD(reg.f1, loc.addr(8, reg.r13)) - self.mc.CDBR(reg.f0, reg.f1) - self.mc.LGHI(reg.r2, loc.imm(0)) - self.mc.BCR(con.EQ, reg.r14) # must not branch - self.mc.LGHI(reg.r2, loc.imm(1)) - self.a.jmpto(reg.r14) + self.mc.LD(r.f0, loc.addr(0, r.r13)) + self.mc.LD(r.f1, loc.addr(8, r.r13)) + self.mc.CDBR(r.f0, r.f1) + self.mc.LGHI(r.r2, loc.imm(0)) + self.mc.BCR(con.EQ, r.r14) # must not branch + self.mc.LGHI(r.r2, loc.imm(1)) + self.a.jmpto(r.r14) assert run_asm(self.a) == 1 def pushpop_jitframe(self, registers): @@ -391,8 +391,6 @@ self.mc.LMG = LMG self.mc.LG = LG - r = reg - # 2-6 self.pushpop_jitframe([r.r2, r.r3, r.r4, r.r5, r.r6, r.r8, r.r10]) assert stored == [(r.r2, r.r6), (r.r8,), (r.r10,)] From pypy.commits at gmail.com Wed Jan 13 04:24:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 01:24:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added to more tests & bug fix: do not use SRLG (logical right shift) or any other shift with parameters R_1 and R_3 that designate the same register Message-ID: <569617bb.4e0e1c0a.81dbc.5861@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81737:d411a9856766 Date: 2016-01-13 10:23 +0100 http://bitbucket.org/pypy/pypy/changeset/d411a9856766/ Log: added to more tests & bug fix: do not use SRLG (logical right shift) or any other shift with parameters R_1 and R_3 that designate the same register diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py --- a/rpython/jit/backend/zarch/helper/assembler.py +++ b/rpython/jit/backend/zarch/helper/assembler.py @@ -27,8 +27,10 @@ def gen_emit_shift(func): def f(self, op, arglocs, regalloc): - l0, l1 = arglocs - getattr(self.mc, func)(l0, l0, l1) + lr, l0, l1 = arglocs + assert lr is not l0 + getattr(self.mc, func)(lr, l0, l1) + f.name = 'emit_shift_' + func return f def gen_emit_rr_or_rpool(rr_func, rp_func): diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -115,9 +115,9 @@ tmp = self.rm.ensure_reg(a1, force_in_reg=True) l1 = addr(0, tmp) l0 = self.ensure_reg(a0, force_in_reg=True) - self.force_result_in_reg(op, a0) + lr = self.force_allocate_reg(op) self.free_op_vars() - return [l0, l1] + return [lr, l0, l1] def generate_cmp_op(signed=True): def prepare_cmp_op(self, op): diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -177,6 +177,16 @@ self.a.jmpto(r.r14) assert run_asm(self.a) == -1 + def test_uint_rshift(self): + self.a.mc.XGR(r.r4, r.r4) + self.a.mc.LGFI(r.r5, loc.imm(63)) + self.a.mc.NGR(r.r4, r.r5) + self.a.mc.LGFI(r.r3, loc.imm(18)) + self.a.mc.LGFI(r.r2, loc.imm(0xffffffff)) + self.a.mc.SRLG(r.r2, r.r3, loc.addr(18)) + self.a.jmpto(r.r14) + assert run_asm(self.a) == 0 + def test_xor(self): self.a.mc.XGR(r.r2, r.r2) self.a.jmpto(r.r14) diff --git a/rpython/jit/backend/zarch/test/test_int.py b/rpython/jit/backend/zarch/test/test_int.py --- a/rpython/jit/backend/zarch/test/test_int.py +++ b/rpython/jit/backend/zarch/test/test_int.py @@ -16,6 +16,25 @@ cpu = CPU_S390_64(rtyper=None, stats=FakeStats()) cpu.setup_once() + def test_uint_rshift(self): + code = """ + [i1] + i11 = int_and(i1, 63) + i10 = uint_rshift(18, i11) + i1402 = int_is_true(i10) + guard_false(i1402, descr=faildescr) [] # must NEVER exit with i1 == 0 + finish(i1402, descr=finishdescr) + """ + finishdescr = BasicFinalDescr(1) + faildescr = BasicFailDescr(2) + loop = parse(code, namespace={'faildescr': faildescr, + 'finishdescr': finishdescr}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, 19) + fail = self.cpu.get_latest_descr(deadframe) + assert fail == finishdescr # ensures that guard is not taken! + def test_double_evenodd_pair(self): code = """ [i0] From pypy.commits at gmail.com Wed Jan 13 05:45:02 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 02:45:02 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: refactor details, start writing docs Message-ID: <56962aae.9a6f1c0a.bb518.7b07@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2573:e982f7a7b2f4 Date: 2016-01-13 11:44 +0100 http://bitbucket.org/cffi/cffi/changeset/e982f7a7b2f4/ Log: refactor details, start writing docs diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -620,26 +620,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, ext=None): - """Values recognized for the ext parameter: + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). - - 'capi': use distutils' default to build CPython C API extensions - - 'system': use the system's default for dynamic libraries (.so/.dll) - - '.FOO': exactly .FOO - - The default is 'capi' when building a non-embedded C API extension, - and 'system' when building an embedded library. + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") - if ext not in (None, 'capi', 'system') and '.' not in ext: - raise ValueError("bad value for 'ext' argument: %r" % (ext,)) module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - target_extention=ext, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,14 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0, target_extention=None, +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: outputfilename = _build(tmpdir, ext, compiler_verbose, - target_extention, embedding) + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -62,7 +62,7 @@ MSVCCompiler._remove_visual_c_ref = \ MSVCCompiler._remove_visual_c_ref_CFFI_BAK -def _build(tmpdir, ext, compiler_verbose=0, target_extention=None, +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution @@ -82,21 +82,9 @@ old_SO = _save_val('SO') old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: - if target_extention is None: - if embedding: - target_extention = 'system' - else: - target_extention = 'capi' - if target_extention == 'capi': - pass # keep the values already in 'SO' and 'EXT_SUFFIX' - else: - if target_extention == 'system': - if sys.platform == 'win32': - target_extention = '.dll' - else: - target_extention = '.so' - _restore_val('SO', target_extention) - _restore_val('EXT_SUFFIX', target_extention) + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') cmd_obj = dist.get_command_obj('build_ext') diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1359,7 +1359,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target_extention=None, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1376,14 +1376,39 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) outputfilename = ffiplatform.compile('.', ext, compiler_verbose, - target_extention, + target_extension, embedding=embedding) finally: os.chdir(cwd) diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst new file mode 100644 --- /dev/null +++ b/doc/source/embedding.rst @@ -0,0 +1,52 @@ +================================ +Using CFFI for embedding +================================ + +.. contents:: + +From *version 1.5,* you can use CFFI to generate a ``.so/.dll`` which +is no longer usable only from Python, but which exports the API of +your choice to any C application that wants to link with this +``.so/.dll``. + + +Usage +----- + +See the `paragraph in the overview page`__ for a quick introduction. +We decompose and explain every step below. We will call *DLL* the +dynamically-loaded library that we are producing; it is a file with +the (default) extension ``.dll`` on Windows or ``.so`` on other +platforms. As usual, it is produced by generating some intermediate +``.c`` code and then calling the regular platform-specific C compiler. + +.. __: overview.html#embedding + +* **ffi.embedding_api(source):** parses the given C source, which + declares functions that you want to be exported by the DLL. It can + also declare types, constants and global variables that are part of + the C-level API of your DLL. + + The functions are automatically produced in the ``.c`` file: they + contain code that initializes the Python interpreter the first time + any of them is called, followed by code to call the associated + Python function (see next point). + + The global variables, on the other hand, are not automatically + produced; you have to write their definition explicitly in + ``ffi.set_source()``, as regular C code. (The C code, as usual, can + include an initializer, or define the missing length for ``int + glob[];``, for example). + +* **ffi.embedding_init_code(python_code):** this stores the given + Python source code inside the DLL. This code will be executed at + runtime when the DLL is first initialized, just after Python itself + is initialized. This Python interpreter runs with the DLL ready + to be imported as a xxxxxxxxxxxxxx + + + It should typically attach a Python function to each + of the C functions declared in ``embedding_api()``. It does this + by importing the ``ffi`` object from the + + with ``@ffi.def_extern()``. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -18,6 +18,7 @@ overview using cdef + embedding Goals diff --git a/doc/source/overview.rst b/doc/source/overview.rst --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -287,6 +287,54 @@ distributed in precompiled form like any other extension module.* +.. _embedding: + +Embedding +--------- + +*New in version 1.5.* + +CFFI can also be used for embedding__, by creating a standard +dynamically-linked library (``.dll`` under Windows, ``.so`` +elsewhere). This DLL can then be used from any C application. + +.. code-block:: python + + import cffi + ffi = cffi.FFI() + + ffi.embedding_api(""" + int do_stuff(int, int); + """) + + ffi.set_source("mystuff", "") + + ffi.embedding_init_code(""" + from mystuff import ffi + + @ffi.def_extern() + def do_stuff(x, y): + print("adding %d and %d" % (x, y)) + return x + y + """) + + ffi.compile(verbose=True) + +This simple example creates ``mystuff.dll`` or ``mystuff.so`` as a DLL +with a single exported function, ``do_stuff()``. You execute the +script above once, with the interpreter you want to have internally +used; it can be CPython 2.x or 3.x or PyPy. This DLL can then be used +"as usual" from an application; the application doesn't need to know +that it is talking with a library made with Python and CFFI. At +runtime, when the application calls ``int do_stuff(int, int)``, the +Python interpreter is automatically initialized and ``def do_stuff(x, +y):`` gets called. `See the details in the documentation about +embedding.`__ + +.. __: embedding.html +.. __: embedding.html + + What actually happened? ----------------------- diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -59,11 +59,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -208,6 +213,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") From pypy.commits at gmail.com Wed Jan 13 06:35:32 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 03:35:32 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: more doc Message-ID: <56963684.03231c0a.9814b.ffffbd31@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2574:3155fc3812ed Date: 2016-01-13 12:35 +0100 http://bitbucket.org/cffi/cffi/changeset/3155fc3812ed/ Log: more doc diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -5,18 +5,17 @@ .. contents:: From *version 1.5,* you can use CFFI to generate a ``.so/.dll`` which -is no longer usable only from Python, but which exports the API of -your choice to any C application that wants to link with this -``.so/.dll``. +exports the API of your choice to any C application that wants to link +with this ``.so/.dll``. Usage ----- See the `paragraph in the overview page`__ for a quick introduction. -We decompose and explain every step below. We will call *DLL* the -dynamically-loaded library that we are producing; it is a file with -the (default) extension ``.dll`` on Windows or ``.so`` on other +In this section, we explain every step in more details. We call *DLL* +the dynamically-loaded library that we are producing; it is a file +with the (default) extension ``.dll`` on Windows or ``.so`` on other platforms. As usual, it is produced by generating some intermediate ``.c`` code and then calling the regular platform-specific C compiler. @@ -27,26 +26,54 @@ also declare types, constants and global variables that are part of the C-level API of your DLL. - The functions are automatically produced in the ``.c`` file: they + The functions are automatically defined in the ``.c`` file: they contain code that initializes the Python interpreter the first time - any of them is called, followed by code to call the associated - Python function (see next point). + any of them is called, followed by code to call the attached + Python function (with ``@ffi.def_extern()``, see next point). The global variables, on the other hand, are not automatically produced; you have to write their definition explicitly in - ``ffi.set_source()``, as regular C code. (The C code, as usual, can - include an initializer, or define the missing length for ``int - glob[];``, for example). + ``ffi.set_source()``, as regular C code. -* **ffi.embedding_init_code(python_code):** this stores the given - Python source code inside the DLL. This code will be executed at - runtime when the DLL is first initialized, just after Python itself - is initialized. This Python interpreter runs with the DLL ready - to be imported as a xxxxxxxxxxxxxx +* **ffi.embedding_init_code(python_code):** this gives + initialization-time Python source code. This code is copied inside + the DLL. At runtime, the code is executed when the DLL is first + initialized, just after Python itself is initialized. This newly + initialized Python interpreter has got the DLL ready to be imported, + typically with a line like ``from module_name import ffi, lib`` + (where ``module_name`` is the name given in first argument to + ``ffi.set_source()``). + + This Python code can import other modules or packages as usual (it + might need to set up ``sys.path`` first). You should use the + decorator ``@ffi.def_extern()`` to attach a Python function to each + of the C functions declared within ``ffi.embedding_api()``. (If you + don't, calling the C function results for now in a message printed + to stderr and a zero return value.) + +* **ffi.set_source(module_name, c_code):** set the name of the module + from Python's point of view. It also gives more C code which will + be included in the generated C code. In simple examples it can be + an empty string. It is where you would ``#include`` some other + files, define global variables, and so on. The macro + ``CFFI_DLLEXPORT`` is available to this C code: it expands to the + platform-specific way of saying "the following declaration should be + exported from the DLL". For example, you would put "``int + my_glob;``" in ``ffi.embedding_api()`` and "``CFFI_DLLEXPORT int + my_glob = 42;``" in ``ffi.set_source()``. +* **ffi.compile([target=...] [, verbose=True]):** make the C code and + compile it. By default, it produces a file called + ``module_name.dll`` or ``module_name.so``, but the default can be + changed with the optional ``target`` keyword argument. You can use + ``target="foo.*"`` with a literal ``*`` to ask for a file called + ``foo.dll`` on Windows or ``foo.so`` elsewhere. (The ``target`` + file name can contain characters not usually allowed in Python + module names.) - It should typically attach a Python function to each - of the C functions declared in ``embedding_api()``. It does this - by importing the ``ffi`` object from the - - with ``@ffi.def_extern()``. + For more complicated cases, you can call instead + ``ffi.emit_c_code("foo.c")`` and compile the resulting ``foo.c`` + file using other means. CFFI's compilation logic is based on the + standard library ``distutils`` package, which is really developed + and tested for the purpose of making CPython extension modules, not + other DLLs. diff --git a/doc/source/overview.rst b/doc/source/overview.rst --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -294,9 +294,9 @@ *New in version 1.5.* -CFFI can also be used for embedding__, by creating a standard -dynamically-linked library (``.dll`` under Windows, ``.so`` -elsewhere). This DLL can then be used from any C application. +CFFI can be used for embedding__: creating a standard +dynamically-linked library (``.dll`` under Windows, ``.so`` elsewhere) +which can be used from a C application. .. code-block:: python @@ -307,10 +307,10 @@ int do_stuff(int, int); """) - ffi.set_source("mystuff", "") + ffi.set_source("my_plugin", "") ffi.embedding_init_code(""" - from mystuff import ffi + from my_plugin import ffi @ffi.def_extern() def do_stuff(x, y): @@ -318,18 +318,18 @@ return x + y """) - ffi.compile(verbose=True) + ffi.compile(target="plugin-1.5.*", verbose=True) -This simple example creates ``mystuff.dll`` or ``mystuff.so`` as a DLL -with a single exported function, ``do_stuff()``. You execute the -script above once, with the interpreter you want to have internally -used; it can be CPython 2.x or 3.x or PyPy. This DLL can then be used -"as usual" from an application; the application doesn't need to know -that it is talking with a library made with Python and CFFI. At -runtime, when the application calls ``int do_stuff(int, int)``, the -Python interpreter is automatically initialized and ``def do_stuff(x, -y):`` gets called. `See the details in the documentation about -embedding.`__ +This simple example creates ``plugin-1.5.dll`` or ``plugin-1.5.so`` as +a DLL with a single exported function, ``do_stuff()``. You execute +the script above once, with the interpreter you want to have +internally used; it can be CPython 2.x or 3.x or PyPy. This DLL can +then be used "as usual" from an application; the application doesn't +need to know that it is talking with a library made with Python and +CFFI. At runtime, when the application calls ``int do_stuff(int, +int)``, the Python interpreter is automatically initialized and ``def +do_stuff(x, y):`` gets called. `See the details in the documentation +about embedding.`__ .. __: embedding.html .. __: embedding.html From pypy.commits at gmail.com Wed Jan 13 07:12:21 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 04:12:21 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: mul overflow, if right is negative up to now the wrong branch of mul overflow has been taken. resulted in abs(v) = abs(x) * neg(z), which is wrong Message-ID: <56963f25.42cbc20a.63b1.6f92@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81738:ac53e51faf32 Date: 2016-01-13 13:10 +0100 http://bitbucket.org/pypy/pypy/changeset/ac53e51faf32/ Log: mul overflow, if right is negative up to now the wrong branch of mul overflow has been taken. resulted in abs(v) = abs(x) * neg(z), which is wrong diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -21,7 +21,7 @@ def prepare_int_add(self, op): a0 = op.getarg(0) a1 = op.getarg(1) - if check_imm32(a0): + if a0.is_constant(): a0, a1 = a1, a0 l0 = self.ensure_reg(a0) if check_imm32(a1): @@ -35,7 +35,7 @@ def prepare_int_mul(self, op): a0 = op.getarg(0) a1 = op.getarg(1) - if check_imm32(a0): + if a0.is_constant(): a0, a1 = a1, a0 l0 = self.ensure_reg(a0) if check_imm32(a1): @@ -49,7 +49,7 @@ def prepare_int_mul_ovf(self, op): a0 = op.getarg(0) a1 = op.getarg(1) - if check_imm32(a0): + if a0.is_constant(): a0, a1 = a1, a0 lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) if check_imm32(a1): @@ -96,7 +96,7 @@ def prepare_int_logic(self, op): a0 = op.getarg(0) a1 = op.getarg(1) - if isinstance(a0, ConstInt): + if a0.is_constant(): a0, a1 = a1, a0 l0 = self.ensure_reg(a0) l1 = self.ensure_reg(a1) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -72,8 +72,9 @@ bc_set_overflow = mc.OIHL_byte_count + mc.SPM_byte_count # check left neg - mc.CGIJ(lq, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count*2)) - mc.CGIJ(l1, l.imm(0), c.GE, l.imm(mc.CGIJ_byte_count*2 + bc_one_signed)) + mc.CGIJ(lq, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count*2+mc.BRC_byte_count)) + mc.CGIJ(l1, l.imm(0), c.GE, l.imm(mc.CGIJ_byte_count*2+mc.BRC_byte_count + bc_one_signed)) + mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + mc.CGIJ_byte_count)) # right is negative mc.CGIJ(l1, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count + bc_one_signed)) # jump if both are negative # left or right is negative mc.LPGR(lq, lq) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -85,10 +85,9 @@ self.offset_map[arg] = self.size self.reserve_literal(8) - def get_descr_offset(self, descr): - return self.offset_map[descr] - def get_offset(self, box): + if not we_are_translated(): + assert self.offset_map[box] >= 0 return self.offset_map[box] def reserve_literal(self, size): diff --git a/rpython/jit/backend/zarch/test/test_int.py b/rpython/jit/backend/zarch/test/test_int.py --- a/rpython/jit/backend/zarch/test/test_int.py +++ b/rpython/jit/backend/zarch/test/test_int.py @@ -116,3 +116,21 @@ deadframe = self.cpu.execute_token(looptoken, v1) fail = self.cpu.get_latest_descr(deadframe) assert self.cpu.get_int_value(deadframe, 0) == result + + @py.test.mark.parametrize('v1,v2', [(-189,2),(189,-2)]) + def test_int_mul_no_overflow_var_var(self, v1, v2): + try: + result = v1*v2 + except OverflowError: + py.test.skip("this test is not made to check the overflow!") + code = """ + [i0,i2] + i1 = int_mul_ovf(i0,i2) + finish(i1, descr=faildescr) + """.format() + loop = parse(code, namespace={"faildescr": BasicFinalDescr(1)}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, v1, v2) + fail = self.cpu.get_latest_descr(deadframe) + assert self.cpu.get_int_value(deadframe, 0) == result From pypy.commits at gmail.com Wed Jan 13 07:14:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 04:14:55 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: renamed method on literal pool object, changed call sites to use the right function name Message-ID: <56963fbf.41dfc20a.8feba.7324@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81739:9eb93197f4a6 Date: 2016-01-13 13:13 +0100 http://bitbucket.org/pypy/pypy/changeset/9eb93197f4a6/ Log: renamed method on literal pool object, changed call sites to use the right function name diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1179,7 +1179,7 @@ self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: # restore the pool address - offset = self.pool.get_descr_offset(descr) + \ + offset = self.pool.get_offset(descr) + \ JUMPABS_TARGET_ADDR__POOL_OFFSET offset_pool = offset + JUMPABS_POOL_ADDR_POOL_OFFSET self.mc.LG(r.SCRATCH, l.pool(offset)) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -134,7 +134,7 @@ def ensure_reg(self, box, force_in_reg, selected_reg=None): if isinstance(box, Const): - offset = self.assembler.pool.get_descr_offset(box) + offset = self.assembler.pool.get_offset(box) poolloc = l.pool(offset) if force_in_reg: if selected_reg is None: From pypy.commits at gmail.com Wed Jan 13 07:30:34 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 04:30:34 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: more doc Message-ID: <5696436a.247bc20a.2f1f.76e0@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2575:084db6ef07fc Date: 2016-01-13 13:30 +0100 http://bitbucket.org/cffi/cffi/changeset/084db6ef07fc/ Log: more doc diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -138,6 +138,8 @@ for ``lib.__class__`` before version 1.4. +.. _cdef: + ffi.cdef(): declaring types and functions ----------------------------------------- diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -77,3 +77,93 @@ standard library ``distutils`` package, which is really developed and tested for the purpose of making CPython extension modules, not other DLLs. + + +More reading +------------ + +(XXX should point to a few places in the rest of the CFFI docs where +people starting from embedding would like to go next) + +XXX copy the content of ffi.embedding() to a .h + + +Embedding and Extending +----------------------- + +The embedding mode is not incompatible with the non-embedding mode of +CFFI. The Python code can import not only ``ffi`` but also ``lib`` +from the module you define. This ``lib`` contains all the C symbols +that are available to Python. This includes all functions and global +variables declared in ``ffi.embedding_api()`` (it is how you should +read/write the global variables from Python). + +But you can use ``ffi.cdef()`` *in addition to* +``ffi.embedding_api()`` to exchange more C functions and global +variables between C and Python, without also making them exports of +the DLL. See `here for more about cdef.`__ + +.. __: cdef.html#cdef + +``ffi.cdef()`` is used to access functions and variables that you can, +and should, define as ``static`` in ``set_source()``. On the other +hand, the C functions declared with ``ffi.embedding_api()`` work +similarly to ``extern "Python"`` functions from ``ffi.cdef()``. +See `here for more about extern "Python".`__ See `here for details +about @ffi.def_extern().`__ + +.. __: using.html#extern-python +.. __: using.html#extern-python-ref + +In some cases, you want to write a DLL-exported C function in C +directly, maybe to handle some cases before calling Python functions. +To do that, you must *not* write the function's signature in +``ffi.embedding_api()``. You must only write the custom function +definition in ``ffi.set_source()``, and prefix it with the macro +CFFI_DLLEXPORT: + +.. code-block:: c + + CFFI_DLLEXPORT int myfunc(int a, int b) + { + /* implementation here */ + } + +This function can, if it wants, invoke Python functions using the +general mechanism of "callbacks" (technically a call from C to Python, +although in this case it is not calling anything back): you need a +``ffi.cdef()`` with "``extern "Python" int mycb(int);``", and then you +can write this in ``ffi.set_source()``: + +.. code-block:: c + + static int mycb(int); /* the callback: forward declaration, to make + it accessible from the C code that follows */ + + CFFI_DLLEXPORT int myfunc(int a, int b) + { + int product = a * b; /* some custom C code */ + return mycb(product); + } + +and then the Python initialization code needs to contain the lines: + +.. code-block:: python + + @ffi.def_extern() + def mycb(x): + print "hi, I'm called with x =", x + return x * 10 + +This ``@ffi.def_extern`` is attaching a Python function to the C +callback ``mycb``, which in this case is not exported from the DLL. +Nevertheless, the automatic initialization of Python occurs at this +time, if it happens that ``mycb()`` is the first function called +from C. (It does not happen when ``myfunc()`` is called: this is just +a C function, with no extra code magically inserted around it. It +only happens when ``myfunc()`` calls ``mycb()``.) + +As the above explanation hints, this is how ``ffi.embedding_api()`` +actually implements function calls that directly invoke Python code; +we have merely decomposed it explicitly, in order to add some custom C +code in the middle. diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -603,6 +603,9 @@ } """) + +.. _extern-python-ref: + Extern "Python": reference ~~~~~~~~~~~~~~~~~~~~~~~~~~ From pypy.commits at gmail.com Wed Jan 13 09:05:45 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 13 Jan 2016 06:05:45 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: cleanups - kill dead code Message-ID: <569659b9.6408c20a.ea89b.ffff9a78@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81740:f151f6a2e7f9 Date: 2016-01-13 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/f151f6a2e7f9/ Log: cleanups - kill dead code diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -171,17 +171,14 @@ return (ll_arg,) + ll_args, tok + token @specialize.memo() - def get_ll_trampoline(token, c_version=False): + def get_ll_trampoline(token): """ Used by the trampoline-version only """ if result_class is None: restok = "i" else: restok = "r" - if c_version: - return cintf.make_c_trampoline_function(name, func, token, - restok) - return cintf.make_trampoline_function(name, func, token, restok) + return cintf.make_c_trampoline_function(name, func, token, restok) def decorated_function(*args): # go through the asm trampoline ONLY if we are translated but not @@ -193,23 +190,13 @@ # If we are being JITted, we want to skip the trampoline, else the # JIT cannot see through it. # - if 0: # this is the trampoline case - if we_are_translated() and not jit.we_are_jitted(): - # if we are translated, call the trampoline - unique_id = get_code_fn(*args)._vmprof_unique_id - ll_args, token = lower(*args) - ll_trampoline = get_ll_trampoline(token) - ll_result = ll_trampoline(*ll_args + (unique_id,)) - else: - return func(*args) - else: # this is the case of the stack - if we_are_translated() and not jit.we_are_jitted(): - unique_id = get_code_fn(*args)._vmprof_unique_id - ll_args, token = lower(*args) - ll_trampoline = get_ll_trampoline(token, True) - ll_result = ll_trampoline(*ll_args + (unique_id,)) - else: - return func(*args) + if we_are_translated() and not jit.we_are_jitted(): + unique_id = get_code_fn(*args)._vmprof_unique_id + ll_args, token = lower(*args) + ll_trampoline = get_ll_trampoline(token, True) + ll_result = ll_trampoline(*ll_args + (unique_id,)) + else: + return func(*args) if result_class is not None: return cast_base_ptr_to_instance(result_class, ll_result) else: From pypy.commits at gmail.com Wed Jan 13 09:58:52 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 06:58:52 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added assertion to pool, rewrote assembler of int_mul_ovf Message-ID: <5696662c.d7bc1c0a.46e24.067d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81741:ee366b2e8bfb Date: 2016-01-13 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/ee366b2e8bfb/ Log: added assertion to pool, rewrote assembler of int_mul_ovf diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -182,6 +182,7 @@ def __init__(self, offset, isfloat=False): AddressLocation.__init__(self, None, None, offset, None) + assert offset >= 0 self.base = 13 self.isfloat = isfloat if self.isfloat: diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -52,60 +52,113 @@ elif l1.is_imm(): self.mc.LGFI(r.SCRATCH, l1) l1 = r.SCRATCH + else: + # we are not allowed to modify l1 if it is not a scratch + # register, thus copy it here! + self.mc.LGR(r.SCRATCH, l1) + l1 = r.SCRATCH mc = self.mc - bc_one_decision = mc.CLGRJ_byte_count +\ - mc.CLGIJ_byte_count + \ - mc.LCGR_byte_count + \ - mc.BRC_byte_count + \ - mc.SPM_byte_count - bc_one_signed = mc.LPGR_byte_count * 2 + \ - mc.MLGR_byte_count + \ - mc.LG_byte_count + \ - bc_one_decision - bc_none_signed = mc.LPGR_byte_count * 2 + \ - mc.MLGR_byte_count + \ - mc.LG_byte_count + \ - mc.CLGRJ_byte_count + \ - mc.CLGIJ_byte_count + \ - mc.BRC_byte_count - bc_set_overflow = mc.OIHL_byte_count + mc.SPM_byte_count # check left neg - mc.CGIJ(lq, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count*2+mc.BRC_byte_count)) - mc.CGIJ(l1, l.imm(0), c.GE, l.imm(mc.CGIJ_byte_count*2+mc.BRC_byte_count + bc_one_signed)) - mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + mc.CGIJ_byte_count)) # right is negative - mc.CGIJ(l1, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count + bc_one_signed)) # jump if both are negative - # left or right is negative + jmp_lq_lt_0 = mc.get_relative_pos() + mc.reserve_cond_jump() # CGIJ lq < 0 +-----------+ + jmp_l1_ge_0 = mc.get_relative_pos() # | + mc.reserve_cond_jump() # CGIJ l1 >= 0 -----------|-> (both same sign) + jmp_lq_pos_l1_neg = mc.get_relative_pos() # | + mc.reserve_cond_jump(short=True) # BCR any -----|-> (xor negative) + jmp_l1_neg_lq_neg = mc.get_relative_pos() # | + mc.reserve_cond_jump() # <-----------------------+ + # CGIJ l1 < 0 -> (both same_sign) + # (xor negative) + label_xor_neg = mc.get_relative_pos() + mc.LPGR(lq, lq) + mc.LPGR(l1, l1) + mc.MLGR(lr, l1) + mc.LG(r.SCRATCH, l.pool(self.pool.constant_64_sign_bit)) + # is the value greater than 2**63 ? then an overflow occured + jmp_xor_lq_overflow = mc.get_relative_pos() + mc.reserve_cond_jump() # CLGRJ lq > 0x8000 ... 00 -> (label_overflow) + jmp_xor_lr_overflow = mc.get_relative_pos() + mc.reserve_cond_jump() # CLGIJ lr > 0 -> (label_overflow) + mc.LCGR(lq, lq) # complement the value + mc.SPM(r.SCRATCH) # 0x80 ... 00 clears the condition code and program mask + jmp_no_overflow_xor_neg = mc.get_relative_pos() + mc.reserve_cond_jump(short=True) + + # both are positive/negative + label_both_same_sign = mc.get_relative_pos() mc.LPGR(lq, lq) mc.LPGR(l1, l1) mc.MLGR(lr, l1) mc.LG(r.SCRATCH, l.pool(self.pool.constant_max_64_positive)) - # is the value greater than 2**63 ? then an overflow occured - mc.CLGRJ(lq, r.SCRATCH, c.GT, l.imm(bc_one_decision + bc_none_signed)) # jump to over overflow - mc.CLGIJ(lr, l.imm(0), c.GT, l.imm(bc_one_decision - mc.CLGRJ_byte_count + bc_none_signed)) # jump to overflow - mc.LCGR(lq, lq) - mc.SPM(r.SCRATCH) # 0x80 ... 00 clears the condition code and program mask - mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + bc_set_overflow + bc_none_signed)) # no overflow happened + jmp_lq_overflow = mc.get_relative_pos() + mc.reserve_cond_jump() # CLGRJ lq > 0x7fff ... ff -> (label_overflow) + jmp_lr_overflow = mc.get_relative_pos() + mc.reserve_cond_jump() # CLGIJ lr > 0 -> (label_overflow) + jmp_neither_lqlr_overflow = mc.get_relative_pos() + mc.reserve_cond_jump(short=True) # BRC any -> (label_end) - # both are positive - mc.LPGR(lq, lq) - mc.LPGR(l1, l1) - mc.MLGR(lr, l1) - off = mc.CLGRJ_byte_count + mc.CLGIJ_byte_count + \ - mc.BRC_byte_count - mc.LG(r.SCRATCH, l.pool(self.pool.constant_64_ones)) - mc.CLGRJ(lq, r.SCRATCH, c.GT, l.imm(off)) # jump to over overflow - mc.CLGIJ(lr, l.imm(0), c.GT, l.imm(off - mc.CLGRJ_byte_count)) # jump to overflow - mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + bc_set_overflow)) # no overflow happened # set overflow! - #mc.IPM(r.SCRATCH) + label_overflow = mc.get_relative_pos() + mc.XGR(r.SCRATCH, r.SCRATCH) # set bit 34 & 35 -> indicates overflow mc.OILH(r.SCRATCH, l.imm(0x3000)) # sets OF mc.SPM(r.SCRATCH) # no overflow happended + label_end = mc.get_relative_pos() + + # patch patch patch!!! + + # jmp_lq_lt_0 + pos = jmp_lq_lt_0 + omc = OverwritingBuilder(self.mc, pos, 1) + omc.CGIJ(lq, l.imm(0), c.LT, l.imm(jmp_l1_neg_lq_neg - pos)) + omc.overwrite() + # jmp_l1_ge_0 + pos = jmp_l1_ge_0 + omc = OverwritingBuilder(self.mc, pos, 1) + omc.CGIJ(l1, l.imm(0), c.GE, l.imm(label_both_same_sign - pos)) + omc.overwrite() + # jmp_lq_pos_l1_neg + pos = jmp_lq_pos_l1_neg + omc = OverwritingBuilder(self.mc, pos, 1) + omc.BRC(c.ANY, l.imm(label_xor_neg - pos)) + omc.overwrite() + # jmp_l1_neg_lq_neg + pos = jmp_l1_neg_lq_neg + omc = OverwritingBuilder(self.mc, pos, 1) + omc.CGIJ(l1, l.imm(0), c.LT, l.imm(label_both_same_sign - pos)) + omc.overwrite() + + # patch jmp_xor_lq_overflow + pos = jmp_xor_lq_overflow + omc = OverwritingBuilder(self.mc, pos, 1) + omc.CLGRJ(lq, r.SCRATCH, c.GT, l.imm(label_overflow - pos)) + omc.overwrite() + # patch jmp_xor_lr_overflow + pos = jmp_xor_lr_overflow + omc = OverwritingBuilder(self.mc, pos, 1) + omc.CLGIJ(lr, l.imm(0), c.GT, l.imm(label_overflow - pos)) + omc.overwrite() + # patch jmp_no_overflow_xor_neg + omc = OverwritingBuilder(self.mc, jmp_no_overflow_xor_neg, 1) + omc.BRC(c.ANY, l.imm(label_end - jmp_no_overflow_xor_neg)) + omc.overwrite() + # patch jmp_lq_overflow + omc = OverwritingBuilder(self.mc, jmp_lq_overflow, 1) + omc.CLGRJ(lq, r.SCRATCH, c.GT, l.imm(label_overflow - jmp_lq_overflow)) + omc.overwrite() + # patch jmp_lr_overflow + omc = OverwritingBuilder(self.mc, jmp_lr_overflow, 1) + omc.CLGIJ(lr, l.imm(0), c.GT, l.imm(label_overflow - jmp_lr_overflow)) + omc.overwrite() + # patch jmp_neither_lqlr_overflow + omc = OverwritingBuilder(self.mc, jmp_neither_lqlr_overflow, 1) + omc.BRC(c.ANY, l.imm(label_end - jmp_neither_lqlr_overflow)) + omc.overwrite() emit_int_floordiv = gen_emit_pool_or_rr_evenodd('DSG','DSGR') emit_uint_floordiv = gen_emit_pool_or_rr_evenodd('DLG','DLGR') From pypy.commits at gmail.com Wed Jan 13 12:21:38 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 13 Jan 2016 09:21:38 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: merge default Message-ID: <569687a2.552f1c0a.4e6bc.0c0b@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81742:215b35bbf061 Date: 2016-01-13 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/215b35bbf061/ Log: merge default diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -331,11 +331,8 @@ class RandomEffectsAnalyzer(BoolGraphAnalyzer): def analyze_external_call(self, funcobj, seen=None): - try: - if funcobj.random_effects_on_gcobjs: - return True - except AttributeError: - return True # better safe than sorry + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( funcobj, seen) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -36,7 +36,7 @@ return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) def analyze_external_call(self, funcobj, seen=None): - if getattr(funcobj, 'random_effects_on_gcobjs', False): + if funcobj.random_effects_on_gcobjs: return True return graphanalyze.BoolGraphAnalyzer.analyze_external_call( self, funcobj, seen) diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -22,7 +22,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, + Ptr, ContainerType, FuncType, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper import rclass @@ -876,18 +876,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -80,13 +79,20 @@ funcobj = op.args[0].value._obj except DelayedPointer: return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() if getattr(funcobj, 'external', None) is not None: x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = get_graph(op.args[0], self.translator) - assert graph is not None + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -65,3 +65,14 @@ op = SpaceOperation('direct_call', [c_f], None) analyzer = BoolGraphAnalyzer(t) assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Wed Jan 13 12:41:37 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 09:41:37 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Use LEA, as discussed on irc Message-ID: <56968c51.034cc20a.70ce7.ffffea51@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81743:d718f341bf5f Date: 2016-01-13 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d718f341bf5f/ Log: Use LEA, as discussed on irc diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -840,8 +840,7 @@ def _call_header_vmprof(self): stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) - self.mc.MOV_rr(eax.value, esp.value) - self.mc.ADD_ri(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # er makes no sense + self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # next self.mc.MOV(ecx, heap(stack)) self.mc.MOV_mr((eax.value, 0), ecx.value) From pypy.commits at gmail.com Wed Jan 13 14:57:01 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 11:57:01 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Finalize and review embedding.rst. Message-ID: <5696ac0d.a85fc20a.6917f.223a@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2576:c251068e143e Date: 2016-01-13 20:56 +0100 http://bitbucket.org/cffi/cffi/changeset/c251068e143e/ Log: Finalize and review embedding.rst. diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -4,72 +4,123 @@ .. contents:: -From *version 1.5,* you can use CFFI to generate a ``.so/.dll`` which -exports the API of your choice to any C application that wants to link -with this ``.so/.dll``. +You can use CFFI to generate a ``.so/.dll`` which exports the API of +your choice to any C application that wants to link with this +``.so/.dll``. + +This is entirely *new in version 1.5.* Usage ----- +.. __: overview.html#embedding + See the `paragraph in the overview page`__ for a quick introduction. -In this section, we explain every step in more details. We call *DLL* -the dynamically-loaded library that we are producing; it is a file -with the (default) extension ``.dll`` on Windows or ``.so`` on other -platforms. As usual, it is produced by generating some intermediate -``.c`` code and then calling the regular platform-specific C compiler. +In this section, we explain every step in more details. We will use +here this slightly expanded example: -.. __: overview.html#embedding +.. code-block:: c + + /* file plugin.h */ + typedef struct { int x, y; } point_t; + extern int do_stuff(point_t *); + +.. code-block:: python + + # file plugin_build.py + import cffi + ffi = cffi.FFI() + + with open('plugin.h') as f: + ffi.embedding_api(f.read()) + + ffi.set_source("my_plugin", ''' + #include "plugin.h" + ''') + + ffi.embedding_init_code(""" + from my_plugin import ffi + + @ffi.def_extern() + def do_stuff(p): + print("adding %d and %d" % (p.x, p.y)) + return p.x + p.y + """) + + ffi.compile(target="plugin-1.5.*", verbose=True) + +Running the code above produces a *DLL*, i,e, a dynamically-loadable +library. It is a file with the extension ``.dll`` on Windows or +``.so`` on other platforms. As usual, it is produced by generating +some intermediate ``.c`` code and then calling the regular +platform-specific C compiler. + +Here are some details about the methods used above: * **ffi.embedding_api(source):** parses the given C source, which declares functions that you want to be exported by the DLL. It can also declare types, constants and global variables that are part of the C-level API of your DLL. - The functions are automatically defined in the ``.c`` file: they - contain code that initializes the Python interpreter the first time - any of them is called, followed by code to call the attached - Python function (with ``@ffi.def_extern()``, see next point). + The functions that are found in ``source`` will be automatically + defined in the ``.c`` file: they will contain code that initializes + the Python interpreter the first time any of them is called, + followed by code to call the attached Python function (with + ``@ffi.def_extern()``, see next point). The global variables, on the other hand, are not automatically - produced; you have to write their definition explicitly in - ``ffi.set_source()``, as regular C code. + produced. You have to write their definition explicitly in + ``ffi.set_source()``, as regular C code (see the point after next). * **ffi.embedding_init_code(python_code):** this gives initialization-time Python source code. This code is copied inside the DLL. At runtime, the code is executed when the DLL is first initialized, just after Python itself is initialized. This newly - initialized Python interpreter has got the DLL ready to be imported, - typically with a line like ``from module_name import ffi, lib`` - (where ``module_name`` is the name given in first argument to - ``ffi.set_source()``). + initialized Python interpreter has got an extra module ready to be + imported, typically with a line like "``from my_plugin import ffi, + lib``". The name ``my_plugin`` comes from the first argument to + ``ffi.set_source()``. (This module represents "the caller's C + world" from the point of view of Python.) - This Python code can import other modules or packages as usual (it - might need to set up ``sys.path`` first). You should use the - decorator ``@ffi.def_extern()`` to attach a Python function to each - of the C functions declared within ``ffi.embedding_api()``. (If you - don't, calling the C function results for now in a message printed - to stderr and a zero return value.) + The initialization-time Python code can import other modules or + packages as usual (it might need to set up ``sys.path`` first). For + every function declared within ``ffi.embedding_api()``, it should + use the decorator ``@ffi.def_extern()`` to attach a corresponding + Python function to it. (Of course, the decorator can appear either + directly in the initialization-time Python code, or in any other + module that it imports. The usual Python rules apply, e.g. you need + "``from my_plugin import ffi``" in a module, otherwise you can't say + ``@ffi.def_extern()``.) -* **ffi.set_source(module_name, c_code):** set the name of the module - from Python's point of view. It also gives more C code which will - be included in the generated C code. In simple examples it can be - an empty string. It is where you would ``#include`` some other - files, define global variables, and so on. The macro + If the initialization-time Python code fails with an exception, then + you get tracebacks printed to stderr. If some function remains + unattached but the C code calls it, an error message is also printed + to stderr and the function returns zero/null. + +* **ffi.set_source(c_module_name, c_code):** set the name of the + module from Python's point of view. It also gives more C code which + will be included in the generated C code. In trivial examples it + can be an empty string. It is where you would ``#include`` some + other files, define global variables, and so on. The macro ``CFFI_DLLEXPORT`` is available to this C code: it expands to the platform-specific way of saying "the following declaration should be - exported from the DLL". For example, you would put "``int + exported from the DLL". For example, you would put "``extern int my_glob;``" in ``ffi.embedding_api()`` and "``CFFI_DLLEXPORT int my_glob = 42;``" in ``ffi.set_source()``. - + + Currently, any *type* declared in ``ffi.embedding_api()`` must also + be present in the ``c_code``. This is automatic if this code + contains a line like ``#include "plugin.h"`` in the example above. + * **ffi.compile([target=...] [, verbose=True]):** make the C code and compile it. By default, it produces a file called - ``module_name.dll`` or ``module_name.so``, but the default can be - changed with the optional ``target`` keyword argument. You can use - ``target="foo.*"`` with a literal ``*`` to ask for a file called - ``foo.dll`` on Windows or ``foo.so`` elsewhere. (The ``target`` - file name can contain characters not usually allowed in Python - module names.) + ``c_module_name.dll`` or ``c_module_name.so``, but the default can + be changed with the optional ``target`` keyword argument. You can + use ``target="foo.*"`` with a literal ``*`` to ask for a file called + ``foo.dll`` on Windows or ``foo.so`` elsewhere. (One point of the + separate ``target`` file name is to include characters not usually + allowed in Python module names, like "``plugin-1.5.*``".) For more complicated cases, you can call instead ``ffi.emit_c_code("foo.c")`` and compile the resulting ``foo.c`` @@ -82,10 +133,57 @@ More reading ------------ -(XXX should point to a few places in the rest of the CFFI docs where -people starting from embedding would like to go next) +If you're reading this page about embedding and you are not familiar +with CFFI already, here are a few pointers to what you could read +next: -XXX copy the content of ffi.embedding() to a .h +* For the ``@ffi.def_extern()`` functions, integer C types are passed + simply as Python integers; and simple pointers-to-struct and basic + arrays are all straightforward enough. However, sooner or later you + will need to read about this topic in more details here__. + +* ``@ffi.def_extern()``: see `documentation here,`__ notably on what + happens if the Python function raises an exception. + +* In embedding mode, the major direction is C code that calls Python + functions. This is the opposite of the regular extending mode of + CFFI, in which the major direction is Python code calling C. That's + why the page `Using the ffi/lib objects`_ talks first about the + latter, and why the direction "C code that calls Python" is + generally referred to as "callbacks" in that page. (If you also + need to have your Python code call C code, read more about + `Embedding and Extending`_ below.) + +* ``ffi.embedding_api(source)``: follows the same syntax as + ``ffi.cdef()``, `documented here.`__ You can use the "``...``" + syntax as well, although in practice it may be less useful than it + is for ``cdef()``. On the other hand, it is expected that often the + C sources that you need to give to ``ffi.embedding_api()`` would be + exactly the same as the content of some ``.h`` file that you want to + give to users of your DLL. That's why the example above does this:: + + with open('foo.h') as f: + ffi.embedding(f.read()) + + Note that a drawback of this approach is that ``ffi.embedding()`` + doesn't support ``#ifdef`` directives. You may have to use a more + convoluted expression like:: + + with open('foo.h') as f: + lines = [line for line in f if not line.startswith('#')] + ffi.embedding(''.join(lines)) + + As in the example above, you can also use the same ``foo.h`` from + ``ffi.set_source()``:: + + ffi.set_source('module_name', '#include "foo.h"') + + +.. __: using.html#working +.. __: using.html#def-extern +.. __: cdef.html#cdef + +.. _`Using the ffi/lib objects`: using.html Embedding and Extending @@ -98,29 +196,19 @@ variables declared in ``ffi.embedding_api()`` (it is how you should read/write the global variables from Python). -But you can use ``ffi.cdef()`` *in addition to* -``ffi.embedding_api()`` to exchange more C functions and global -variables between C and Python, without also making them exports of -the DLL. See `here for more about cdef.`__ +You can use *both* ``ffi.embedding_api()`` and ``ffi.cdef()`` in the +same build script. You put in the former the declarations you want to +be exported by the DLL; you put in the latter only the C functions and +types that you want to share between C and Python, but not export from +the DLL. -.. __: cdef.html#cdef - -``ffi.cdef()`` is used to access functions and variables that you can, -and should, define as ``static`` in ``set_source()``. On the other -hand, the C functions declared with ``ffi.embedding_api()`` work -similarly to ``extern "Python"`` functions from ``ffi.cdef()``. -See `here for more about extern "Python".`__ See `here for details -about @ffi.def_extern().`__ - -.. __: using.html#extern-python -.. __: using.html#extern-python-ref - -In some cases, you want to write a DLL-exported C function in C -directly, maybe to handle some cases before calling Python functions. -To do that, you must *not* write the function's signature in -``ffi.embedding_api()``. You must only write the custom function -definition in ``ffi.set_source()``, and prefix it with the macro -CFFI_DLLEXPORT: +As an example of that, consider the case where you would like to have +a DLL-exported C function written in C directly, maybe to handle some +cases before calling Python functions. To do that, you must *not* put +the function's signature in ``ffi.embedding_api()``. (Note that this +requires more hacks if you use ``ffi.embedding(f.read())``.) You must +only write the custom function definition in ``ffi.set_source()``, and +prefix it with the macro CFFI_DLLEXPORT: .. code-block:: c @@ -131,20 +219,25 @@ This function can, if it wants, invoke Python functions using the general mechanism of "callbacks" (technically a call from C to Python, -although in this case it is not calling anything back): you need a -``ffi.cdef()`` with "``extern "Python" int mycb(int);``", and then you -can write this in ``ffi.set_source()``: +although in this case it is not calling anything back): -.. code-block:: c +.. code-block:: python - static int mycb(int); /* the callback: forward declaration, to make - it accessible from the C code that follows */ + ffi.cdef(""" + extern "Python" int mycb(int); + """) - CFFI_DLLEXPORT int myfunc(int a, int b) - { - int product = a * b; /* some custom C code */ - return mycb(product); - } + ffi.set_source("my_plugin", """ + + static int mycb(int); /* the callback: forward declaration, to make + it accessible from the C code that follows */ + + CFFI_DLLEXPORT int myfunc(int a, int b) + { + int product = a * b; /* some custom C code */ + return mycb(product); + } + """) and then the Python initialization code needs to contain the lines: @@ -165,5 +258,12 @@ As the above explanation hints, this is how ``ffi.embedding_api()`` actually implements function calls that directly invoke Python code; -we have merely decomposed it explicitly, in order to add some custom C -code in the middle. +here, we have merely decomposed it explicitly, in order to add some +custom C code in the middle. + +In case you need to force, from C code, Python to be initialized +before the first ``@ffi.def_extern()`` is called, you can do so by +calling the C function ``cffi_start_python()`` with no argument. It +returns an integer, 0 or -1, to tell if the initialization succeeded +or not. Currently there is no way to prevent a failing initialization +from also dumping a traceback and more information to stderr. diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -604,8 +604,6 @@ """) -.. _extern-python-ref: - Extern "Python": reference ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -632,6 +630,8 @@ return a default value. This can be controlled with ``error`` and ``onerror``, described below. +.. _def-extern: + The ``@ffi.def_extern()`` decorator takes these optional arguments: * ``name``: the name of the function as written in the cdef. By default From pypy.commits at gmail.com Wed Jan 13 15:39:50 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 12:39:50 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Fix some comments by fijal Message-ID: <5696b616.82e11c0a.48504.ffffe9f6@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2577:8fa58724b6ad Date: 2016-01-13 21:39 +0100 http://bitbucket.org/cffi/cffi/changeset/8fa58724b6ad/ Log: Fix some comments by fijal diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -8,6 +8,22 @@ your choice to any C application that wants to link with this ``.so/.dll``. +The general idea is as follows: + +* You write and execute a Python script, which produces a ``.so/.dll`` + file with the API of your choice. The script also gives some Python + code to be "frozen" inside the ``.so``. + +* At runtime, the C application loads this ``.so/.dll`` without having + to know that it was produced by Python and CFFI. + +* The first time a C function is called, Python is initialized and + the frozen Python code is executed. + +* The frozen Python code attaches Python functions that implement the + C functions of your API, which are then used for all subsequent C + function calls. + This is entirely *new in version 1.5.* @@ -74,29 +90,31 @@ ``ffi.set_source()``, as regular C code (see the point after next). * **ffi.embedding_init_code(python_code):** this gives - initialization-time Python source code. This code is copied inside - the DLL. At runtime, the code is executed when the DLL is first - initialized, just after Python itself is initialized. This newly - initialized Python interpreter has got an extra module ready to be - imported, typically with a line like "``from my_plugin import ffi, + initialization-time Python source code. This code is copied + ("frozen") inside the DLL. At runtime, the code is executed when + the DLL is first initialized, just after Python itself is + initialized. This newly initialized Python interpreter has got an + extra "built-in" module that will be loaded magically without + accessing any files, with a line like "``from my_plugin import ffi, lib``". The name ``my_plugin`` comes from the first argument to - ``ffi.set_source()``. (This module represents "the caller's C - world" from the point of view of Python.) + ``ffi.set_source()``. This module represents "the caller's C world" + from the point of view of Python. The initialization-time Python code can import other modules or - packages as usual (it might need to set up ``sys.path`` first). For - every function declared within ``ffi.embedding_api()``, it should - use the decorator ``@ffi.def_extern()`` to attach a corresponding - Python function to it. (Of course, the decorator can appear either - directly in the initialization-time Python code, or in any other - module that it imports. The usual Python rules apply, e.g. you need - "``from my_plugin import ffi``" in a module, otherwise you can't say - ``@ffi.def_extern()``.) + packages as usual. You may have typical Python issues like needing + to set up ``sys.path`` somehow manually first. + + For every function declared within ``ffi.embedding_api()``, the + initialization-time Python code or one of the modules it imports + should use the decorator ``@ffi.def_extern()`` to attach a + corresponding Python function to it. If the initialization-time Python code fails with an exception, then - you get tracebacks printed to stderr. If some function remains - unattached but the C code calls it, an error message is also printed - to stderr and the function returns zero/null. + you get a traceback printed to stderr, along with more information + to help you identify problems like wrong ``sys.path``. If some + function remains unattached at the time where the C code tries to + call it, an error message is also printed to stderr and the function + returns zero/null. * **ffi.set_source(c_module_name, c_code):** set the name of the module from Python's point of view. It also gives more C code which @@ -118,9 +136,9 @@ ``c_module_name.dll`` or ``c_module_name.so``, but the default can be changed with the optional ``target`` keyword argument. You can use ``target="foo.*"`` with a literal ``*`` to ask for a file called - ``foo.dll`` on Windows or ``foo.so`` elsewhere. (One point of the - separate ``target`` file name is to include characters not usually - allowed in Python module names, like "``plugin-1.5.*``".) + ``foo.dll`` on Windows or ``foo.so`` elsewhere. One reason for + specifying an alternate ``target`` is to include characters not + usually allowed in Python module names, like "``plugin-1.5.*``". For more complicated cases, you can call instead ``ffi.emit_c_code("foo.c")`` and compile the resulting ``foo.c`` @@ -150,9 +168,9 @@ CFFI, in which the major direction is Python code calling C. That's why the page `Using the ffi/lib objects`_ talks first about the latter, and why the direction "C code that calls Python" is - generally referred to as "callbacks" in that page. (If you also + generally referred to as "callbacks" in that page. If you also need to have your Python code call C code, read more about - `Embedding and Extending`_ below.) + `Embedding and Extending`_ below. * ``ffi.embedding_api(source)``: follows the same syntax as ``ffi.cdef()``, `documented here.`__ You can use the "``...``" @@ -190,11 +208,7 @@ ----------------------- The embedding mode is not incompatible with the non-embedding mode of -CFFI. The Python code can import not only ``ffi`` but also ``lib`` -from the module you define. This ``lib`` contains all the C symbols -that are available to Python. This includes all functions and global -variables declared in ``ffi.embedding_api()`` (it is how you should -read/write the global variables from Python). +CFFI. You can use *both* ``ffi.embedding_api()`` and ``ffi.cdef()`` in the same build script. You put in the former the declarations you want to @@ -218,8 +232,9 @@ } This function can, if it wants, invoke Python functions using the -general mechanism of "callbacks" (technically a call from C to Python, -although in this case it is not calling anything back): +general mechanism of "callbacks"---called this way because it is a +call from C to Python, although in this case it is not calling +anything back: .. code-block:: python @@ -252,9 +267,10 @@ callback ``mycb``, which in this case is not exported from the DLL. Nevertheless, the automatic initialization of Python occurs at this time, if it happens that ``mycb()`` is the first function called -from C. (It does not happen when ``myfunc()`` is called: this is just -a C function, with no extra code magically inserted around it. It -only happens when ``myfunc()`` calls ``mycb()``.) +from C. More precisely, it does not happen when ``myfunc()`` is +called: this is just a C function, with no extra code magically +inserted around it. It only happens when ``myfunc()`` calls +``mycb()``. As the above explanation hints, this is how ``ffi.embedding_api()`` actually implements function calls that directly invoke Python code; From pypy.commits at gmail.com Wed Jan 13 15:44:54 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 13 Jan 2016 12:44:54 -0800 (PST) Subject: [pypy-commit] pypy default: another assert: we only re-size the ._items of an array info if it is not Message-ID: <5696b746.247bc20a.2f1f.3004@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81744:283ba71d08ea Date: 2016-01-09 00:35 +0100 http://bitbucket.org/pypy/pypy/changeset/283ba71d08ea/ Log: another assert: we only re-size the ._items of an array info if it is not virtual diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -528,6 +528,7 @@ if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: From pypy.commits at gmail.com Wed Jan 13 15:44:56 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 13 Jan 2016 12:44:56 -0800 (PST) Subject: [pypy-commit] pypy default: rename lazy_setfield to lazy_set (since it also means "setarrayitem") Message-ID: <5696b748.022f1c0a.26c91.ffff8a33@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81745:fc60c9c667a2 Date: 2016-01-09 00:41 +0100 http://bitbucket.org/pypy/pypy/changeset/fc60c9c667a2/ Log: rename lazy_setfield to lazy_set (since it also means "setarrayitem") diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -32,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -41,7 +41,7 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None + self._lazy_set = None def register_info(self, structop, info): # invariant: every struct or array ptr info, that is not virtual and @@ -53,27 +53,27 @@ def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -86,27 +86,27 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield - self._lazy_setfield = op + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) @@ -114,15 +114,15 @@ return res.get_box_replacement() return None - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -250,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -326,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -354,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -432,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -442,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -453,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -471,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -511,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): From pypy.commits at gmail.com Wed Jan 13 15:44:58 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 13 Jan 2016 12:44:58 -0800 (PST) Subject: [pypy-commit] pypy default: merge Message-ID: <5696b74a.034cc20a.70ce7.2ac8@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81746:80bb10086af7 Date: 2016-01-13 21:43 +0100 http://bitbucket.org/pypy/pypy/changeset/80bb10086af7/ Log: merge diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -32,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -41,7 +41,7 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None + self._lazy_set = None def register_info(self, structop, info): # invariant: every struct or array ptr info, that is not virtual and @@ -53,27 +53,27 @@ def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -86,27 +86,27 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield - self._lazy_setfield = op + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) @@ -114,15 +114,15 @@ return res.get_box_replacement() return None - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -250,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -326,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -354,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -432,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -442,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -453,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -471,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -511,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -528,6 +528,7 @@ if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: From pypy.commits at gmail.com Wed Jan 13 15:46:45 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 13 Jan 2016 12:46:45 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: store the first w_globals that a code object is run in on the code object. if a Message-ID: <5696b7b5.482e1c0a.3d702.ffff8d4b@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81747:a2af053cbe15 Date: 2016-01-13 21:31 +0100 http://bitbucket.org/pypy/pypy/changeset/a2af053cbe15/ Log: store the first w_globals that a code object is run in on the code object. if a frame runs within that code object (which is almost always), it does not need to store it. diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -33,9 +33,11 @@ f_lineno = 0 # current lineno for tracing is_being_profiled = False w_locals = None + w_globals = None def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +69,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +91,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +118,15 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + + def set_w_globals(self, w_globals): + self.getorcreatedebug().w_globals = w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +212,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +461,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +670,14 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + def fset_w_globals(self, space, w_obj): + # bit silly, but GetSetProperty passes a space + return self.set_w_globals(w_obj) + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals, PyFrame.fset_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1139,7 +1139,7 @@ space = w_self.space caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_name = space.finditem(w_globals, space.wrap('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,7 @@ for key, w_value in vars.items(): space.setitem(self.w_locals, space.wrap(key), w_value) if isinstance(code, str): - return space.eval(code, self.w_globals, self.w_locals) + return space.eval(code, self.get_w_globals(), self.w_locals) pyc = pycode.PyCode._from_code(space, code) return pyc.exec_host_bytecode(self.w_globals, self.w_locals) exec_ = eval @@ -248,7 +248,7 @@ #if filename.endswith("pyc"): # filename = filename[:-1] try: - space.exec_(str(source), frame.w_globals, w_locals, + space.exec_(str(source), frame.get_w_globals(), w_locals, filename=filename) except OperationError, e: if e.match(space, w_ExpectedException): From pypy.commits at gmail.com Wed Jan 13 15:46:47 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 13 Jan 2016 12:46:47 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: introduce a special class W_ModuleDictObject which stores the strategy as a Message-ID: <5696b7b7.cb571c0a.75fb7.535a@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81748:bccc7eeb61f9 Date: 2016-01-13 21:40 +0100 http://bitbucket.org/pypy/pypy/changeset/bccc7eeb61f9/ Log: introduce a special class W_ModuleDictObject which stores the strategy as a quasi-immutable field. that way, global lookups really produce 0 ops, even in the preamble. diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -153,7 +153,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -42,6 +42,14 @@ class W_DictMultiObject(W_Root): + """ Abstract base class that does not store a strategy. """ + def get_strategy(self): + raise NotImplementedError("abstract method") + + def set_strategy(self, strategy): + raise NotImplementedError("abstract method") + + @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, strdict=False, @@ -52,6 +60,10 @@ # every module needs its own strategy, because the strategy stores # the version tag strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) + W_ModuleDictObject.__init__(w_obj, space, strategy, storage) + return w_obj elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) @@ -68,18 +80,17 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_obj = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_obj, space, strategy, storage) + w_obj = space.allocate_instance(W_DictObject, w_type) + W_DictObject.__init__(w_obj, space, strategy, storage) return w_obj - def __init__(self, space, strategy, storage): + def __init__(self, space, storage): self.space = space - self.strategy = strategy self.dstorage = storage def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.get_strategy()) def unwrap(w_dict, space): result = {} @@ -101,7 +112,7 @@ self.setitem(w_k, w_v) def setitem_str(self, key, w_value): - self.strategy.setitem_str(self, key, w_value) + self.get_strategy().setitem_str(self, key, w_value) @staticmethod def descr_new(space, w_dicttype, __args__): @@ -261,8 +272,9 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.has_iterreversed: - it = self.strategy.iterreversed(self) + strategy = self.get_strategy() + if strategy.has_iterreversed: + it = strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: # fall-back @@ -337,6 +349,37 @@ init_or_update(space, self, __args__, 'dict.update') +class W_DictObject(W_DictMultiObject): + """ a regular dict object """ + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.dstrategy = strategy + + def get_strategy(self): + return self.dstrategy + + def set_strategy(self, strategy): + self.dstrategy = strategy + + +class W_ModuleDictObject(W_DictMultiObject): + """ a dict object for a module, that is not expected to change. It stores + the strategy as a quasi-immutable field. """ + _immutable_fields_ = ['mstrategy?'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.mstrategy = strategy + + def get_strategy(self): + return self.mstrategy + + def set_strategy(self, strategy): + self.mstrategy = strategy + + + + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ popitem delitem clear \ @@ -347,7 +390,7 @@ def make_method(method): def f(self, *args): - return getattr(self.strategy, method)(self, *args) + return getattr(self.get_strategy(), method)(self, *args) f.func_name = method return f @@ -490,7 +533,7 @@ def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def listview_bytes(self, w_dict): @@ -556,32 +599,32 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(UnicodeDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_int_strategy(self, w_dict): strategy = self.space.fromcache(IntDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_identity_strategy(self, w_dict): from pypy.objspace.std.identitydict import IdentityDictStrategy strategy = self.space.fromcache(IdentityDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_object_strategy(self, w_dict): strategy = self.space.fromcache(ObjectDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def getitem(self, w_dict, w_key): @@ -662,7 +705,7 @@ if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 - if self.strategy is self.dictimplementation.strategy: + if self.strategy is self.dictimplementation.get_strategy(): return result # common case else: # waaa, obscure case: the strategy changed, but not the @@ -804,7 +847,7 @@ else: return # w_dict is completely empty, nothing to do count = w_dict.length() - 1 - w_updatedict.strategy.prepare_update(w_updatedict, count) + w_updatedict.get_strategy().prepare_update(w_updatedict, count) # If the strategy is still different, continue the slow way if not same_strategy(self, w_updatedict): for key, value, keyhash in iteritemsh: @@ -825,7 +868,7 @@ def same_strategy(self, w_otherdict): return (setitem_untyped is not None and - w_otherdict.strategy is self) + w_otherdict.get_strategy() is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -934,7 +977,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, value in d.iteritems(): d_new[self.wrap(key)] = value - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) # --------------- iterator interface ----------------- @@ -1178,7 +1221,7 @@ def update1_dict_dict(space, w_dict, w_data): - w_data.strategy.rev_update1_dict_dict(w_data, w_dict) + w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -18,7 +18,7 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage @@ -142,7 +142,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): d_new[self.wrap(keys[i])] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def switch_to_bytes_strategy(self, w_dict): @@ -152,7 +152,7 @@ d_new = strategy.unerase(storage) for i in range(len(keys)): d_new[keys[i]] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def view_as_kwargs(self, w_dict): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -6,7 +6,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, - BaseValueIterator, BaseItemIterator, _never_equal_to_string + BaseValueIterator, BaseItemIterator, _never_equal_to_string, + W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell @@ -407,7 +408,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) - w_dict = W_DictMultiObject(space, strategy, storage) + w_dict = W_DictObject(space, strategy, storage) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag return w_dict @@ -422,8 +423,8 @@ # new dict. If the old dict was using the MapDictStrategy, we # have to force it now: otherwise it would remain an empty # shell that continues to delegate to 'self'. - if type(w_olddict.strategy) is MapDictStrategy: - w_olddict.strategy.switch_to_object_strategy(w_olddict) + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag @@ -641,7 +642,7 @@ w_obj = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) dict_w = strategy.unerase(strategy.get_empty_storage()) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(dict_w) assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None materialize_r_dict(self.space, w_obj, dict_w) @@ -750,7 +751,7 @@ def next_key_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -772,7 +773,7 @@ def next_value_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -793,7 +794,7 @@ def next_item_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None, None if self.curr_map: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -18,7 +18,7 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, W_DictObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject @@ -439,7 +439,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_bytes() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() @@ -454,7 +454,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_unicode() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() @@ -467,7 +467,7 @@ def listview_int(self, w_obj): if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() @@ -485,7 +485,7 @@ return None def view_as_kwargs(self, w_dict): - if type(w_dict) is W_DictMultiObject: + if type(w_dict) is W_DictObject: return w_dict.view_as_kwargs() return (None, None) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,7 +1,7 @@ import py from pypy.objspace.std.celldict import ModuleDictStrategy -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictObject, W_ModuleDictObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, FakeString) @@ -14,7 +14,7 @@ def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version key = "a" @@ -23,30 +23,30 @@ v2 = strategy.version assert v1 is not v2 assert d.getitem(w_key) == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) == 1 d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 assert d.getitem(w_key) == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 2 d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 assert d.getitem(w_key) == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 3 d.delitem(w_key) v5 = strategy.version assert v5 is not v4 assert d.getitem(w_key) is None - assert d.strategy.getdictvalue_no_unwrapping(d, key) is None + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version x = object() @@ -134,7 +134,7 @@ py.test.skip("__repr__ doesn't work on appdirect") strategy = ModuleDictStrategy(cls.space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + cls.w_d = W_ModuleDictObject(cls.space, strategy, storage) def test_popitem(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,14 +2,14 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + W_DictObject, BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): def test_empty(self): d = self.space.newdict() assert not self.space.is_true(d) - assert type(d.strategy) is not ObjectDictStrategy + assert type(d.get_strategy()) is not ObjectDictStrategy def test_nonempty(self): space = self.space @@ -1050,7 +1050,7 @@ return l def newlist_bytes(self, l): return l - DictObjectCls = W_DictMultiObject + DictObjectCls = W_DictObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str @@ -1076,7 +1076,7 @@ return tuple(l) def newdict(self, module=False, instance=False): - return W_DictMultiObject.allocate_and_init_instance( + return W_DictObject.allocate_and_init_instance( self, module=module, instance=instance) def view_as_kwargs(self, w_d): @@ -1105,7 +1105,7 @@ w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode - w_dict = W_DictMultiObject + w_dict = W_DictObject iter = iter fixedview = list listview = list @@ -1149,8 +1149,8 @@ def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() - w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None) - W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage) + w_dict = self.fakespace.allocate_instance(W_DictObject, None) + W_DictObject.__init__(w_dict, self.fakespace, strategy, storage) return w_dict def fill_impl(self): @@ -1159,7 +1159,7 @@ def check_not_devolved(self): #XXX check if strategy changed!? - assert type(self.impl.strategy) is self.StrategyClass + assert type(self.impl.get_strategy()) is self.StrategyClass #assert self.impl.r_dict_content is None def test_popitem(self): @@ -1246,7 +1246,7 @@ for x in xrange(100): impl.setitem(self.fakespace.str_w(str(x)), x) impl.setitem(x, x) - assert type(impl.strategy) is ObjectDictStrategy + assert type(impl.get_strategy()) is ObjectDictStrategy def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names @@ -1308,7 +1308,7 @@ class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) - self.impl.strategy.switch_to_object_strategy(self.impl) + self.impl.get_strategy().switch_to_object_strategy(self.impl) def check_not_devolved(self): pass @@ -1320,5 +1320,5 @@ def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.get_strategy()) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,5 +1,5 @@ import py -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.kwargsdict import * space = FakeSpace() @@ -9,7 +9,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -23,7 +23,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -52,7 +52,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -69,11 +69,11 @@ def test_limit_size(): storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) for i in range(100): assert d.setitem_str("d%s" % i, 4) is None - assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert d.get_strategy() is not strategy + assert "BytesDictStrategy" == d.get_strategy().__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -82,7 +82,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) w_l = d.w_keys() # does not crash def test_view_as_kwargs(): @@ -91,26 +91,27 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == [], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) d.setitem_str("a", 3) - assert isinstance(d.strategy, KwargsDictStrategy) + assert isinstance(d.get_strategy(), KwargsDictStrategy) from pypy.objspace.std.test.test_dictmultiobject import BaseTestRDictImplementation, BaseTestDevolvedDictImplementation def get_impl(self): storage = strategy.erase(([], [])) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) + class TestKwargsDictImplementation(BaseTestRDictImplementation): StrategyClass = KwargsDictStrategy get_impl = get_impl diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * class Config: @@ -309,7 +309,7 @@ obj.setdictvalue(space, "c", 7) assert obj.storage == [50, 60, 70, 5, 6, 7] - class FakeDict(W_DictMultiObject): + class FakeDict(W_DictObject): def __init__(self, d): self.dstorage = d @@ -368,7 +368,7 @@ def devolve_dict(space, obj): w_d = obj.getdict(space) - w_d.strategy.switch_to_object_strategy(w_d) + w_d.get_strategy().switch_to_object_strategy(w_d) def test_get_setdictvalue_after_devolve(): cls = Class() @@ -1127,7 +1127,7 @@ def test_newdict_instance(): w_dict = space.newdict(instance=True) - assert type(w_dict.strategy) is MapDictStrategy + assert type(w_dict.get_strategy()) is MapDictStrategy class TestMapDictImplementationUsingnewdict(BaseTestRDictImplementation): StrategyClass = MapDictStrategy diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -478,12 +478,12 @@ def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy - from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject if w_self.lazyloaders: w_self._cleanup_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) def is_heaptype(w_self): return w_self.flag_heaptype From pypy.commits at gmail.com Wed Jan 13 15:53:45 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 13 Jan 2016 12:53:45 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: minor stuff Message-ID: <5696b959.e251c20a.edf1d.32c6@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2578:49c462981569 Date: 2016-01-13 21:53 +0100 http://bitbucket.org/cffi/cffi/changeset/49c462981569/ Log: minor stuff diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -94,7 +94,7 @@ ("frozen") inside the DLL. At runtime, the code is executed when the DLL is first initialized, just after Python itself is initialized. This newly initialized Python interpreter has got an - extra "built-in" module that will be loaded magically without + extra "built-in" module that can be loaded magically without accessing any files, with a line like "``from my_plugin import ffi, lib``". The name ``my_plugin`` comes from the first argument to ``ffi.set_source()``. This module represents "the caller's C world" @@ -264,9 +264,9 @@ return x * 10 This ``@ffi.def_extern`` is attaching a Python function to the C -callback ``mycb``, which in this case is not exported from the DLL. -Nevertheless, the automatic initialization of Python occurs at this -time, if it happens that ``mycb()`` is the first function called +callback ``mycb()``, which in this case is not exported from the DLL. +Nevertheless, the automatic initialization of Python occurs when +``mycb()`` is called, if it happens to be the first function called from C. More precisely, it does not happen when ``myfunc()`` is called: this is just a C function, with no extra code magically inserted around it. It only happens when ``myfunc()`` calls From pypy.commits at gmail.com Wed Jan 13 16:07:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 13 Jan 2016 13:07:22 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: seems like i got rid of this nasty SIGFPE (fix point overflow), the register was dirty which could have lead to accidentally setting the FPE bit in PSW Message-ID: <5696bc8a.4c0c1c0a.25017.5a60@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81749:6d185183c341 Date: 2016-01-13 22:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6d185183c341/ Log: seems like i got rid of this nasty SIGFPE (fix point overflow), the register was dirty which could have lead to accidentally setting the FPE bit in PSW copied the stacklet switch header file, saved registers but need to convert the rest as well diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -82,6 +82,7 @@ jmp_xor_lr_overflow = mc.get_relative_pos() mc.reserve_cond_jump() # CLGIJ lr > 0 -> (label_overflow) mc.LCGR(lq, lq) # complement the value + mc.XGR(r.SCRATCH, r.SCRATCH) mc.SPM(r.SCRATCH) # 0x80 ... 00 clears the condition code and program mask jmp_no_overflow_xor_neg = mc.get_relative_pos() mc.reserve_cond_jump(short=True) @@ -102,8 +103,8 @@ # set overflow! label_overflow = mc.get_relative_pos() + # set bit 34 & 35 -> indicates overflow mc.XGR(r.SCRATCH, r.SCRATCH) - # set bit 34 & 35 -> indicates overflow mc.OILH(r.SCRATCH, l.imm(0x3000)) # sets OF mc.SPM(r.SCRATCH) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -177,6 +177,16 @@ self.a.jmpto(r.r14) assert run_asm(self.a) == -1 + def test_or_bitpos_0to15(self): + self.a.mc.XGR(r.r2, r.r2) + self.a.mc.OIHH(r.r2, loc.imm(0x0000)) + self.a.mc.OIHL(r.r2, loc.imm(0x0000)) + self.a.mc.OILL(r.r2, loc.imm(0x0000)) + self.a.mc.OILH(r.r2, loc.imm(0x300c)) + self.a.jmpto(r.r14) + res = run_asm(self.a) + assert res == 0x00000000300c0000 + def test_uint_rshift(self): self.a.mc.XGR(r.r4, r.r4) self.a.mc.LGFI(r.r5, loc.imm(63)) @@ -187,6 +197,16 @@ self.a.jmpto(r.r14) assert run_asm(self.a) == 0 + def test_ag_overflow(self): + self.a.mc.BRC(con.ANY, loc.imm(4+8+8)) + self.a.mc.write('\x7f' + '\xff' * 7) + self.a.mc.write('\x7f' + '\xff' * 7) + self.a.mc.LARL(r.r5, loc.imm(-8)) + self.a.mc.LG(r.r4, loc.addr(8,r.r5)) + self.a.mc.AG(r.r4, loc.addr(0,r.r5)) + self.a.jmpto(r.r14) + assert run_asm(self.a) == 0 + def test_xor(self): self.a.mc.XGR(r.r2, r.r2) self.a.jmpto(r.r14) diff --git a/rpython/translator/c/src/stacklet/slp_platformselect.h b/rpython/translator/c/src/stacklet/slp_platformselect.h --- a/rpython/translator/c/src/stacklet/slp_platformselect.h +++ b/rpython/translator/c/src/stacklet/slp_platformselect.h @@ -14,6 +14,8 @@ #include "switch_ppc64_gcc.h" /* gcc on ppc64 */ #elif defined(__GNUC__) && defined(__mips__) && defined(_ABI64) #include "switch_mips64_gcc.h" /* gcc on mips64 */ +#elif defined(__GNUC__) && defined(__s390x__) && defined(_ABI64) +#include "switch_s390x_gcc.h" #else #error "Unsupported platform!" #endif diff --git a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h new file mode 100644 --- /dev/null +++ b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h @@ -0,0 +1,114 @@ +#if !(defined(__LITTLE_ENDIAN__) ^ defined(__BIG_ENDIAN__)) +# error "cannot determine if it is ppc64 or ppc64le" +#endif + +#ifdef __BIG_ENDIAN__ +# define TOC_AREA "40" +#else +# define TOC_AREA "24" +#endif + + +/* This depends on these attributes so that gcc generates a function + with no code before the asm, and only "blr" after. */ +static __attribute__((noinline, optimize("O2"))) +void *slp_switch(void *(*save_state)(void*, void*), + void *(*restore_state)(void*, void*), + void *extra) +{ + void *result; + __asm__ volatile ( + /* By Vaibhav Sood & Armin Rigo, with some copying from + the Stackless version by Kristjan Valur Jonsson */ + + /* Save all 18 volatile GP registers, 18 volatile FP regs, and 12 + volatile vector regs. We need a stack frame of 144 bytes for FPR, + 144 bytes for GPR, 192 bytes for VR plus 48 bytes for the standard + stackframe = 528 bytes (a multiple of 16). */ + + //"mflr 0\n" /* Save LR into 16(r1) */ + //"stg 0, 16(1)\n" + + "stmg 6,15,48(15)\n" + + "std 0,128(15)\n" + "std 2,136(15)\n" + "std 4,144(15)\n" + "std 6,152(15)\n" + + "lay 15,-160(15)\n" /* Create stack frame */ + + "lgr 10, %[restore_state]\n" /* save 'restore_state' for later */ + "lgr 11, %[extra]\n" /* save 'extra' for later */ + "lgr 14, %[save_state]\n" /* move 'save_state' into r14 for branching */ + "mr 2, 15\n" /* arg 1: current (old) stack pointer */ + "mr 3, 11\n" /* arg 2: extra */ + + "stdu 1, -48(1)\n" /* create temp stack space (see below) */ +#ifdef __BIG_ENDIAN__ + "ld 0, 0(12)\n" + "ld 11, 16(12)\n" + "mtctr 0\n" + "ld 2, 8(12)\n" +#else + "mtctr 12\n" /* r12 is fixed by this ABI */ +#endif + "bctrl\n" /* call save_state() */ + "addi 1, 1, 48\n" /* destroy temp stack space */ + + "CGIJ 2, 0, 7, zero\n" /* skip the rest if the return value is null */ + + "lgr 15, 2\n" /* change the stack pointer */ + /* From now on, the stack pointer is modified, but the content of the + stack is not restored yet. It contains only garbage here. */ + + "mr 4, 15\n" /* arg 2: extra */ + /* arg 1: current (new) stack pointer + is already in r3 */ + + "stdu 1, -48(1)\n" /* create temp stack space for callee to use */ + /* ^^^ we have to be careful. The function call will store the link + register in the current frame (as the ABI) dictates. But it will + then trample it with the restore! We fix this by creating a fake + stack frame */ + +#ifdef __BIG_ENDIAN__ + "ld 0, 0(14)\n" /* 'restore_state' is in r14 */ + "ld 11, 16(14)\n" + "mtctr 0\n" + "ld 2, 8(14)\n" +#endif +#ifdef __LITTLE_ENDIAN__ + "mr 12, 14\n" /* copy 'restore_state' */ + "mtctr 12\n" /* r12 is fixed by this ABI */ +#endif + + "bctrl\n" /* call restore_state() */ + "addi 1, 1, 48\n" /* destroy temp stack space */ + + /* The stack's content is now restored. */ + + "zero:\n" + + /* Epilogue */ + + // "mtcrf 0xff, 12\n" + + // "addi 1,1,528\n" + + "lay 15,160(15)\n" /* restore stack pointer */ + + "ld 0,128(15)\n" + "ld 2,136(15)\n" + "ld 4,144(15)\n" + "ld 6,152(15)\n" + + "lmg 6,15,48(15)\n" + + : "=r"(result) /* output variable: expected to be r2 */ + : [restore_state]"r"(restore_state), /* input variables */ + [save_state]"r"(save_state), + [extra]"r"(extra) + ); + return result; +} From pypy.commits at gmail.com Wed Jan 13 18:19:23 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 13 Jan 2016 15:19:23 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: I'm coming Message-ID: <5696db7b.ca061c0a.f98cf.78a1@mx.google.com> Author: Ronan Lamy Branch: extradoc Changeset: r5593:200bc0012762 Date: 2016-01-13 23:18 +0000 http://bitbucket.org/pypy/extradoc/changeset/200bc0012762/ Log: I'm coming diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -16,6 +16,7 @@ Matti Picus 20-25 Ermina Manuel Jacob 20-28 Ermina Richard Plangger 20-28 Ermina +Ronan Lamy 20-27 Ermina? ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Wed Jan 13 18:25:41 2016 From: pypy.commits at gmail.com (sbauman) Date: Wed, 13 Jan 2016 15:25:41 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Remove getfield_gc_pure_* instructions from tests (purity information is now in the descriptor) Message-ID: <5696dcf5.82e11c0a.48504.1477@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81752:5e436297bd5a Date: 2016-01-13 18:25 -0500 http://bitbucket.org/pypy/pypy/changeset/5e436297bd5a/ Log: Remove getfield_gc_pure_* instructions from tests (purity information is now in the descriptor) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -85,9 +85,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -435,7 +435,7 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_value(p7, ConstPtr(ptr25), descr=...) @@ -445,14 +445,14 @@ p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -469,7 +469,7 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -84,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -93,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -82,7 +82,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) From pypy.commits at gmail.com Wed Jan 13 18:25:37 2016 From: pypy.commits at gmail.com (sbauman) Date: Wed, 13 Jan 2016 15:25:37 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Check no longer needed Message-ID: <5696dcf1.62f3c20a.32396.564a@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81750:23d46522a1dc Date: 2016-01-13 13:48 -0500 http://bitbucket.org/pypy/pypy/changeset/23d46522a1dc/ Log: Check no longer needed diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -221,7 +221,7 @@ def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): - if op.is_always_pure() and not op.is_getfield(): + if op.is_always_pure(): sb.add_pure_op(op) if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) From pypy.commits at gmail.com Wed Jan 13 18:25:39 2016 From: pypy.commits at gmail.com (sbauman) Date: Wed, 13 Jan 2016 15:25:39 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Don't invalidate field caches for immutable fields Message-ID: <5696dcf3.8e371c0a.bcd8f.ffffaf9a@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81751:1ddab6bd3654 Date: 2016-01-13 18:23 -0500 http://bitbucket.org/pypy/pypy/changeset/1ddab6bd3654/ Log: Don't invalidate field caches for immutable fields diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -183,6 +183,8 @@ return res def invalidate(self, descr): + if descr.is_always_pure(): + return for opinfo in self.cached_infos: assert isinstance(opinfo, info.AbstractStructPtrInfo) opinfo._fields[descr.get_index()] = None From pypy.commits at gmail.com Thu Jan 14 03:40:58 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 00:40:58 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: rejigger test_pypy_c tests Message-ID: <56975f1a.2851c20a.d69fa.ffffd0fd@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81753:4588aac79c87 Date: 2016-01-14 09:39 +0100 http://bitbucket.org/pypy/pypy/changeset/4588aac79c87/ Log: rejigger test_pypy_c tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) From pypy.commits at gmail.com Thu Jan 14 04:13:49 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 01:13:49 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Add some sections Message-ID: <569766cd.4e0e1c0a.81dbc.0a2e@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2579:7d0c3632fe95 Date: 2016-01-14 10:13 +0100 http://bitbucket.org/cffi/cffi/changeset/7d0c3632fe95/ Log: Add some sections diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -24,6 +24,12 @@ C functions of your API, which are then used for all subsequent C function calls. +One of the goals of this approach is to be entirely independent from +the CPython C API: no ``Py_Initialize()`` nor ``PyRun_SimpleString()`` +nor even ``PyObject``. It works identically on CPython and PyPy. + +.. note:: PyPy release 4.0.1 contains CFFI 1.4 only. + This is entirely *new in version 1.5.* @@ -163,6 +169,9 @@ * ``@ffi.def_extern()``: see `documentation here,`__ notably on what happens if the Python function raises an exception. +* To create Python objects attached to C data, one common solution is + to use ``ffi.new_handle()``. See documentation here__. + * In embedding mode, the major direction is C code that calls Python functions. This is the opposite of the regular extending mode of CFFI, in which the major direction is Python code calling C. That's @@ -199,11 +208,82 @@ .. __: using.html#working .. __: using.html#def-extern +.. __: using.html#ffi-new_handle .. __: cdef.html#cdef .. _`Using the ffi/lib objects`: using.html +Troubleshooting +--------------- + +The error message + + cffi extension module 'c_module_name' has unknown version 0x2701 + +means that the running Python interpreter located a CFFI version older +than 1.5. CFFI 1.5 or newer must be installed in the running Python. + + +Using multiple CFFI-made DLLs +----------------------------- + +Multiple CFFI-made DLLs can be used by the same process. + +Note that all CFFI-made DLLs in a process share a single Python +interpreter. The effect is the same as the one you get by trying to +build a large Python application by assembling a lot of unrelated +packages. Some of these might be libraries that monkey-patch some +functions from the standard library, for example, which might be +unexpected from other parts. + + +Multithreading +-------------- + +Multithreading should work transparently, based on Python's standard +Global Interpreter Lock. + +If two threads both try to call a C function when Python is not yet +initialized, then locking occurs. One thread proceeds with +initialization and blocks the other thread. The other thread will be +allowed to continue only when the execution of the initialization-time +Python code is done. + +If the two threads call two *different* CFFI-made DLLs, the Python +initialization itself will still be serialized, but the two pieces of +initialization-time Python code will not. The idea is that there is a +priori no reason for one DLL to wait for initialization of the other +DLL to be complete. + +After initialization, Python's standard Global Interpreter Lock kicks +in. The end result is that when one CPU progresses on executing +Python code, no other CPU can progress on executing more Python code +from another thread of the same process. At regular intervals, the +lock switches to a different thread, so that no single thread should +appear to block indefinitely. + + +Testing +------- + +For testing purposes, a CFFI-made DLL can be imported in a running +Python interpreter instead of being loaded like a C shared library. + +You might have some issues with the file name: for example, on +Windows, Python expects the file to be called ``c_module_name.pyd``, +but the CFFI-made DLL is called ``target.dll`` instead. The base name +``target`` is the one specified in ``ffi.compile()``, and on Windows +the extension is ``.dll`` instead of ``.pyd``. You have to rename or +copy the file, or on POSIX use a symlink. + +The module then works like a regular CFFI extension module. It is +imported with ``from c_module_name import ffi, lib`` and exposes on +the ``lib`` object all C functions. You can test it by calling these +C functions. The initialization-time Python code included with the +DLL is executed the first time such a call is done. + + Embedding and Extending ----------------------- diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -423,6 +423,7 @@ with ``int foo();`` really means ``int foo(void);``.) +.. _extern-python: .. _`extern "Python"`: Extern "Python" (new-style callbacks) @@ -1069,12 +1070,13 @@ points in time, and using it in a ``with`` statement. +.. _ffi-new_handle: .. _`ffi.new_handle()`: **ffi.new_handle(python_object)**: return a non-NULL cdata of type ``void *`` that contains an opaque reference to ``python_object``. You can pass it around to C functions or store it into C structures. Later, -you can use **ffi.from_handle(p)** to retrive the original +you can use **ffi.from_handle(p)** to retrieve the original ``python_object`` from a value with the same ``void *`` pointer. *Calling ffi.from_handle(p) is invalid and will likely crash if the cdata object returned by new_handle() is not kept alive!* From pypy.commits at gmail.com Thu Jan 14 04:28:23 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 01:28:23 -0800 (PST) Subject: [pypy-commit] cffi default: Expand the error message Message-ID: <56976a37.84c9c20a.76280.ffffdfac@mx.google.com> Author: Armin Rigo Branch: Changeset: r2580:6e962065ec24 Date: 2016-01-14 10:28 +0100 http://bitbucket.org/cffi/cffi/changeset/6e962065ec24/ Log: Expand the error message diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,6 +2,8 @@ #include #include "structmember.h" +#define CFFI_VERSION "1.4.2" + #ifdef MS_WIN32 #include #include "misc_win32.h" @@ -6500,7 +6502,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.2"); + v = PyText_FromString(CFFI_VERSION); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/cffi1_module.c b/c/cffi1_module.c --- a/c/cffi1_module.c +++ b/c/cffi1_module.c @@ -169,8 +169,10 @@ if (version < CFFI_VERSION_MIN || version > CFFI_VERSION_MAX) { if (!PyErr_Occurred()) PyErr_Format(PyExc_ImportError, - "cffi extension module '%s' has unknown version %p", - module_name, (void *)version); + "cffi extension module '%s' uses an unknown version tag %p. " + "This module might need a more recent version of cffi " + "than the one currently installed, which is %s", + module_name, (void *)version, CFFI_VERSION); return NULL; } From pypy.commits at gmail.com Thu Jan 14 04:31:59 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 01:31:59 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: details Message-ID: <56976b0f.460f1c0a.ecff2.1505@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2581:baa217ccf175 Date: 2016-01-14 10:31 +0100 http://bitbucket.org/cffi/cffi/changeset/baa217ccf175/ Log: details diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -278,9 +278,9 @@ copy the file, or on POSIX use a symlink. The module then works like a regular CFFI extension module. It is -imported with ``from c_module_name import ffi, lib`` and exposes on +imported with "``from c_module_name import ffi, lib``" and exposes on the ``lib`` object all C functions. You can test it by calling these -C functions. The initialization-time Python code included with the +C functions. The initialization-time Python code frozen inside the DLL is executed the first time such a call is done. From pypy.commits at gmail.com Thu Jan 14 04:34:52 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 01:34:52 -0800 (PST) Subject: [pypy-commit] pypy default: Expand the error message Message-ID: <56976bbc.6953c20a.5d168.ffffe6c2@mx.google.com> Author: Armin Rigo Branch: Changeset: r81754:e57a3cef398c Date: 2016-01-14 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e57a3cef398c/ Log: Expand the error message diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) From pypy.commits at gmail.com Thu Jan 14 05:22:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 02:22:01 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: remove confusing class level default Message-ID: <569776c9.cf821c0a.f83f5.5238@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81755:12c73603f24a Date: 2016-01-14 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/12c73603f24a/ Log: remove confusing class level default diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -33,7 +33,6 @@ f_lineno = 0 # current lineno for tracing is_being_profiled = False w_locals = None - w_globals = None def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno From pypy.commits at gmail.com Thu Jan 14 05:22:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 02:22:03 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: frame.f_globals is not writable Message-ID: <569776cb.6a69c20a.b6a34.006f@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81756:6b34788b3303 Date: 2016-01-14 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/6b34788b3303/ Log: frame.f_globals is not writable diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -123,9 +123,6 @@ return debugdata.w_globals return jit.promote(self.pycode).w_globals - def set_w_globals(self, w_globals): - self.getorcreatedebug().w_globals = w_globals - def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -673,9 +670,6 @@ # bit silly, but GetSetProperty passes a space return self.get_w_globals() - def fset_w_globals(self, space, w_obj): - # bit silly, but GetSetProperty passes a space - return self.set_w_globals(w_obj) ### line numbers ### diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = GetSetProperty(PyFrame.fget_w_globals, PyFrame.fset_w_globals), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ From pypy.commits at gmail.com Thu Jan 14 06:27:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 03:27:01 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: still searching for this weird problem. seems to be introduced when switching from a guard to a bridge in a very rare case Message-ID: <56978605.6918c20a.a5769.0bd5@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81757:41b2950af11a Date: 2016-01-14 10:29 +0100 http://bitbucket.org/pypy/pypy/changeset/41b2950af11a/ Log: still searching for this weird problem. seems to be introduced when switching from a guard to a bridge in a very rare case diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -535,11 +535,11 @@ return chr(ord(c) + ord(c1)) functions = [ - #(func_int, lltype.Signed, types.sint, 655360, 655360), - #(func_int, lltype.Signed, types.sint, 655360, -293999429), + (func_int, lltype.Signed, types.sint, 655360, 655360), + (func_int, lltype.Signed, types.sint, 655360, -293999429), (func_int, rffi.SHORT, types.sint16, 1213, 1213), - #(func_int, rffi.SHORT, types.sint16, 1213, -12020), - #(func_char, lltype.Char, types.uchar, 12, 12), + (func_int, rffi.SHORT, types.sint16, 1213, -12020), + (func_char, lltype.Char, types.uchar, 12, 12), ] cpu = self.cpu diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -58,6 +58,7 @@ gpr_regs = 0 fpr_regs = 0 stack_params = [] + print("### prepare_arguemtns:") for i in range(num_args): loc = arglocs[i] if not arglocs[i].is_float(): @@ -65,8 +66,10 @@ non_float_locs.append(arglocs[i]) non_float_regs.append(self.GPR_ARGS[gpr_regs]) gpr_regs += 1 + print(" %d: %s at [%s];" % (i, arglocs[i], self.GPR_ARGS[gpr_regs-1])) else: stack_params.append(i) + print(" %d: %s at stack[%d];" % (i,arglocs[i], len(stack_params)-1)) else: if fpr_regs < max_fpr_in_reg: float_locs.append(arglocs[i]) @@ -74,8 +77,8 @@ else: stack_params.append(i) - self.subtracted_to_sp += len(stack_params) * 8 - base = -len(stack_params) * 8 + self.subtracted_to_sp += len(stack_params) * WORD + base = -len(stack_params) * WORD if self.is_call_release_gil: self.subtracted_to_sp += 8*WORD base -= 8*WORD @@ -139,8 +142,8 @@ self.mc.raw_call() def restore_stack_pointer(self): - if self.subtracted_to_sp != 0: - self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) + # it must at LEAST be 160 bytes + self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) def load_result(self): assert (self.resloc is None or @@ -226,30 +229,28 @@ reg = self.resloc PARAM_SAVE_AREA_OFFSET = 0 if reg is not None: + # save 1 word below the stack pointer if reg.is_core_reg(): - self.mc.STG(reg, l.addr(-7*WORD, r.SP)) + self.mc.STG(reg, l.addr(-1*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.STD(reg, l.addr(-7*WORD, r.SP)) + self.mc.STD(reg, l.addr(-1*WORD, r.SP)) + self.mc.push_std_frame(8*WORD) self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) self.mc.raw_call() + self.mc.pop_std_frame(8*WORD) if reg is not None: if reg.is_core_reg(): - self.mc.LG(reg, l.addr(-7*WORD, r.SP)) + self.mc.LG(reg, l.addr(-1*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.LD(reg, l.addr(-7*WORD, r.SP)) + self.mc.LD(reg, l.addr(-1*WORD, r.SP)) # replace b1_location with BEQ(here) pmc = OverwritingBuilder(self.mc, b1_location, 1) pmc.BRCL(c.EQ, l.imm(self.mc.currpos() - b1_location)) pmc.overwrite() - # restore the values that is void after LMG - if gcrootmap: - if gcrootmap.is_shadow_stack and self.is_call_release_gil: - self.mc.LGR(r.SCRATCH, RSHADOWOLD) self.mc.LMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) - def write_real_errno(self, save_err): if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read '*_errno' and write it into the diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -217,12 +217,12 @@ def restore_link(self): self.LG(r.RETURN, l.addr(14*WORD, r.SP)) - def push_std_frame(self): - self.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) - self.LAY(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) + def push_std_frame(self, additional_bytes=0): + self.STG(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) + self.LAY(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) - def pop_std_frame(self): - self.LAY(r.SP, l.addr(STD_FRAME_SIZE_IN_BYTES, r.SP)) + def pop_std_frame(self, additional_bytes=0): + self.LAY(r.SP, l.addr(STD_FRAME_SIZE_IN_BYTES + additional_bytes, r.SP)) def get_assembler_function(self): "NOT_RPYTHON: tests only" diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -204,8 +204,9 @@ self.a.mc.LARL(r.r5, loc.imm(-8)) self.a.mc.LG(r.r4, loc.addr(8,r.r5)) self.a.mc.AG(r.r4, loc.addr(0,r.r5)) + self.a.mc.LGR(r.r2, r.r4) self.a.jmpto(r.r14) - assert run_asm(self.a) == 0 + assert run_asm(self.a) == -2 def test_xor(self): self.a.mc.XGR(r.r2, r.r2) diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -28,3 +28,36 @@ # realloc frame takes the most space (from just after larl, to lay) bridge_loop_instructions = "larl; lg; cgfi; je; lghi; stg; " \ "lay; lgfi;( iihf;)? lgfi;( iihf;)? basr; lay; lg; br;$" + + def test_multiple_arguments(self): + from rpython.rtyper.annlowlevel import llhelper + from rpython.jit.metainterp.typesystem import deref + from rpython.rlib.jit_libffi import types + from rpython.jit.codewriter.effectinfo import EffectInfo + from rpython.rlib.rarithmetic import intmask + + def func_int(a, b, c, d, e, f): + sum = intmask(a) + intmask(b) + intmask(c) + intmask(d) + intmask(e) + intmask(f) + return sum + + functions = [ + (func_int, lltype.Signed, types.sint, 655360, 655360), + (func_int, lltype.Signed, types.sint, 655360, -293999429), + ] + + cpu = self.cpu + for func, TP, ffi_type, num, num1 in functions: + # + FPTR = self.Ptr(self.FuncType([TP] * 6, TP)) + func_ptr = llhelper(FPTR, func) + FUNC = deref(FPTR) + funcbox = self.get_funcbox(cpu, func_ptr) + # first, try it with the "normal" calldescr + calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + iargs = [0x7fffFFFFffffFFFF,1,0,0,0,0] + args = [InputArgInt(num) for num in iargs] + res = self.execute_operation(rop.CALL_I, + [funcbox] + args, + 'int', descr=calldescr) + assert res == sum(iargs) From pypy.commits at gmail.com Thu Jan 14 06:27:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 03:27:03 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <56978607.e906c20a.75447.1525@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81758:7cc617cb941e Date: 2016-01-14 10:29 +0100 http://bitbucket.org/pypy/pypy/changeset/7cc617cb941e/ Log: merged default diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -330,15 +330,11 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - if funcobj.random_effects_on_gcobjs: - return True - except (AttributeError, lltype.DelayedPointer): - return True # better safe than sorry + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( - op, seen) + funcobj, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -21,7 +21,10 @@ pass -class CachedField(object): +class AbstractCachedEntry(object): + """ abstract base class abstracting over the difference between caching + struct fields and array items. """ + def __init__(self): # Cache information for a field descr, or for an (array descr, index) # pair. It can be in one of two states: @@ -29,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -38,43 +41,39 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None - self._lazy_setfield_registered = False + self._lazy_set = None - def register_dirty_field(self, structop, info): + def register_info(self, structop, info): + # invariant: every struct or array ptr info, that is not virtual and + # that has a non-None entry at + # info._fields[descr.get_index()] + # must be in cache_infos self.cached_structs.append(structop) self.cached_infos.append(info) - def invalidate(self, descr): - for opinfo in self.cached_infos: - assert isinstance(opinfo, info.AbstractStructPtrInfo) - opinfo._fields[descr.get_index()] = None - self.cached_infos = [] - self.cached_structs = [] - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) - arg1 = optheap.get_box_replacement(self._getvalue(op)) + arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -87,58 +86,43 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield, and register - # myself in the optheap's _lazy_setfields_and_arrayitems list - self._lazy_setfield = op - #if not self._lazy_setfield_registered: - # self._lazy_setfield_registered = True + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield - return optheap.get_box_replacement(self._getvalue(op)) + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set + return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) if res is not None: return res.get_box_replacement() return None - def _getvalue(self, op): - return op.getarg(1) - - def _getfield(self, opinfo, descr, optheap, true_force=True): - res = opinfo.getfield(descr, optheap) - if isinstance(res, PreambleOp): - if not true_force: - return res.op - res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) - return res - - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -151,25 +135,74 @@ # back in the cache: the value of this particular structure's # field. opinfo = optheap.ensure_ptr_info_arg0(op) - self._setfield(op, opinfo, optheap) + self.put_field_back_to_info(op, opinfo, optheap) elif not can_cache: self.invalidate(descr) - def _setfield(self, op, opinfo, optheap): + + # abstract methods + + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ + raise NotImplementedError("abstract method") + + def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ + raise NotImplementedError("abstract method") + + def _getfield(self, opinfo, descr, optheap, true_force=True): + raise NotImplementedError("abstract method") + + def invalidate(self, descr): + """ clear all the cached knowledge in the infos in self.cached_infos. + """ + raise NotImplementedError("abstract method") + + +class CachedField(AbstractCachedEntry): + def _get_rhs_from_set_op(self, op): + return op.getarg(1) + + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setfield(op.getdescr(), struct, arg, optheap, self) + opinfo.setfield(op.getdescr(), struct, arg, optheap=optheap, cf=self) -class ArrayCachedField(CachedField): + def _getfield(self, opinfo, descr, optheap, true_force=True): + res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos + if isinstance(res, PreambleOp): + if not true_force: + return res.op + res = optheap.optimizer.force_op_from_preamble(res) + opinfo.setfield(descr, None, res, optheap=optheap) + return res + + def invalidate(self, descr): + for opinfo in self.cached_infos: + assert isinstance(opinfo, info.AbstractStructPtrInfo) + opinfo._fields[descr.get_index()] = None + self.cached_infos = [] + self.cached_structs = [] + + +class ArrayCachedItem(AbstractCachedEntry): def __init__(self, index): self.index = index - CachedField.__init__(self) + AbstractCachedEntry.__init__(self) - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): return op.getarg(2) def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getitem(descr, self.index, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.ArrayPtrInfo): + assert opinfo in self.cached_infos if (isinstance(res, PreambleOp) and optheap.optimizer.cpu.supports_guard_gc_type): if not true_force: @@ -179,10 +212,10 @@ opinfo.setitem(descr, index, None, res, optheap=optheap) return res - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) + opinfo.setitem(op.getdescr(), self.index, struct, arg, optheap=optheap, cf=self) def invalidate(self, descr): for opinfo in self.cached_infos: @@ -201,15 +234,11 @@ self.postponed_op = None - # XXXX the rest is old - # cached array items: {array descr: {index: CachedField}} - #self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # - self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False @@ -221,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -234,7 +263,7 @@ descrkeys = self.cached_fields.keys() if not we_are_translated(): # XXX Pure operation of boxes that are cached in several places will - # only be removed from the peeled loop when red from the first + # only be removed from the peeled loop when read from the first # place discovered here. This is far from ideal, as it makes # the effectiveness of our optimization a bit random. It should # howevere always generate correct results. For tests we dont @@ -249,14 +278,7 @@ d.produce_potential_short_preamble_ops(self.optimizer, sb, descr, index) - def register_dirty_field(self, descr, op, info): - self.field_cache(descr).register_dirty_field(op, info) - - def register_dirty_array_field(self, arraydescr, op, index, info): - self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info) - def clean_caches(self): - del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) @@ -285,7 +307,7 @@ try: cf = submap[index] except KeyError: - cf = submap[index] = ArrayCachedField(index) + cf = submap[index] = ArrayCachedItem(index) return cf def emit_operation(self, op): @@ -304,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -332,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -410,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -420,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -431,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -449,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -489,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): @@ -503,7 +525,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) + structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -554,12 +576,12 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) - # the remember the result of reading the array item + # then remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), - self.get_box_replacement(op), cf, - self) + self.get_box_replacement(op), optheap=self, + cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -196,28 +196,28 @@ def all_items(self): return self._fields - def setfield(self, descr, struct, op, optheap=None, cf=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) assert isinstance(op, AbstractValue) - self._fields[descr.get_index()] = op + self._fields[fielddescr.get_index()] = op if cf is not None: assert not self.is_virtual() assert struct is not None - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) - def getfield(self, descr, optheap=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) - return self._fields[descr.get_index()] + def getfield(self, fielddescr, optheap=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) + return self._fields[fielddescr.get_index()] def _force_elements(self, op, optforce, descr): if self._fields is None: return - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], - descr=flddescr) + descr=fielddescr) self._fields[i] = None optforce.emit_operation(setfieldop) @@ -249,16 +249,16 @@ if fieldinfo and fieldinfo.is_virtual(): fieldinfo.visitor_walk_recursive(op, visitor, optimizer) - def produce_short_preamble_ops(self, structbox, descr, index, optimizer, + def produce_short_preamble_ops(self, structbox, fielddescr, index, optimizer, shortboxes): if self._fields is None: return - if descr.get_index() >= len(self._fields): + if fielddescr.get_index() >= len(self._fields): # we don't know about this item return - op = optimizer.get_box_replacement(self._fields[descr.get_index()]) - opnum = OpHelpers.getfield_for_descr(descr) - getfield_op = ResOperation(opnum, [structbox], descr=descr) + op = optimizer.get_box_replacement(self._fields[fielddescr.get_index()]) + opnum = OpHelpers.getfield_for_descr(fielddescr) + getfield_op = ResOperation(opnum, [structbox], descr=fielddescr) shortboxes.add_heap_op(op, getfield_op) def _is_immutable_and_filled_with_constants(self, optimizer, memo=None): @@ -294,12 +294,12 @@ return True def _force_elements_immutable(self, descr, constptr, optforce): - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] subbox = optforce.force_box(fld) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, - flddescr, constptr, subbox) + fielddescr, constptr, subbox) class InstancePtrInfo(AbstractStructPtrInfo): _attrs_ = ('_known_class',) @@ -505,6 +505,7 @@ info._items = self._items[:] def _force_elements(self, op, optforce, descr): + # XXX descr = op.getdescr() const = optforce.new_const_item(self.descr) for i in range(self.length): @@ -523,15 +524,16 @@ optforce.emit_operation(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: assert not self.is_virtual() - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getitem(self, descr, index, optheap=None): if self._items is None or index >= len(self._items): @@ -626,13 +628,13 @@ i = 0 fielddescrs = op.getdescr().get_all_fielddescrs() for index in range(self.length): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: fld = self._items[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], - descr=flddescr) + descr=fielddescr) optforce.emit_operation(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here @@ -645,7 +647,7 @@ fielddescrs = self.descr.get_all_fielddescrs() i = 0 for index in range(self.getlength()): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: itemop = self._items[i] if (itemop is not None and not isinstance(itemop, Const)): @@ -691,21 +693,21 @@ optheap.const_infos[ref] = info return info - def getfield(self, descr, optheap=None): - info = self._get_info(descr.get_parent_descr(), optheap) - return info.getfield(descr) + def getfield(self, fielddescr, optheap=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + return info.getfield(fielddescr) def getitem(self, descr, index, optheap=None): info = self._get_array_info(descr, optheap) return info.getitem(descr, index) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): info = self._get_array_info(descr, optheap) - info.setitem(descr, index, struct, op, cf) + info.setitem(descr, index, struct, op, optheap=optheap, cf=cf) - def setfield(self, descr, struct, op, optheap=None, cf=None): - info = self._get_info(descr.get_parent_descr(), optheap) - info.setfield(descr, struct, op, optheap, cf) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + info.setfield(fielddescr, struct, op, optheap=optheap, cf=cf) def is_null(self): return not bool(self._const.getref_base()) diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -81,7 +81,7 @@ assert index >= 0 cf = optheap.arrayitem_cache(descr, index) opinfo.setitem(self.getfield_op.getdescr(), index, self.res, - pop, cf, optheap=optheap) + pop, optheap, cf) def repr(self, memo): return "HeapOp(%s, %s)" % (self.res.repr(memo), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -696,58 +696,6 @@ # ---------- - def test_virtual_1(self): - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - setfield_gc(p0, i1, descr=valuedescr) - jump(i, p0) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_float(self): - ops = """ - [f, p0] - f0 = getfield_gc(p0, descr=floatdescr) - f1 = float_add(f0, f) - setfield_gc(p0, f1, descr=floatdescr) - jump(f, p0) - """ - expected = """ - [f, f2] - f1 = float_add(f2, f) - jump(f, f1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected) - - def test_virtual_2(self): - py.test.skip("XXX") - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - p1 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p1, i1, descr=valuedescr) - jump(i, p1) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_oois(self): ops = """ [p0, p1, p2] @@ -774,20 +722,6 @@ guard_false(i12) [] jump(p0, p1, p2) """ - expected = """ - [p2] - # all constant-folded :-) - jump(p2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable), - Virtual(node_vtable), - Not''', - expected) - # - # to be complete, we also check the no-opt case where most comparisons - # are not removed. The exact set of comparisons removed depends on - # the details of the algorithm... expected2 = """ [p0, p1, p2] guard_nonnull(p0) [] @@ -801,26 +735,6 @@ """ self.optimize_loop(ops, expected2) - def test_virtual_default_field(self): - py.test.skip("XXX") - ops = """ - [p0] - i0 = getfield_gc(p0, descr=valuedescr) - guard_value(i0, 0) [] - p1 = new_with_vtable(ConstClass(node_vtable)) - # the field 'value' has its default value of 0 - jump(p1) - """ - expected = """ - [i] - guard_value(i, 0) [] - jump(0) - """ - # the 'expected' is sub-optimal, but it should be done by another later - # optimization step. See test_find_nodes_default_field() for why. - self.optimize_loop(ops, 'Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_3(self): ops = """ [i] @@ -837,55 +751,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_4(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - jump(i3, p1) - """ - expected = """ - [i0, i1] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_5(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p2 = new_with_vtable(descr=nodesize2) - setfield_gc(p2, i1, descr=valuedescr) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - setfield_gc(p1, p2, descr=nextdescr) - jump(i3, p1) - """ - expected = """ - [i0, i1, i1bis] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2, i1) - """ - self.optimize_loop(ops, - '''Not, Virtual(node_vtable, - valuedescr=Not, - nextdescr=Virtual(node_vtable2, - valuedescr=Not))''', - expected) - def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1209,27 +1074,6 @@ """ self.optimize_loop(ops, expected) - def test_varray_2(self): - ops = """ - [i0, p1] - i1 = getarrayitem_gc(p1, 0, descr=arraydescr) - i2 = getarrayitem_gc(p1, 1, descr=arraydescr) - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - p2 = new_array(2, descr=arraydescr) - setarrayitem_gc(p2, 1, i0, descr=arraydescr) - setarrayitem_gc(p2, 0, 20, descr=arraydescr) - jump(i0, p2) - """ - expected = """ - [i0, i1, i2] - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - jump(i0, 20, i0) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr, Not, Not)', expected) - def test_p123_array(self): ops = """ [i1, p2, p3] @@ -1264,23 +1108,6 @@ """ self.optimize_loop(ops, expected) - def test_vstruct_1(self): - py.test.skip("XXX") - ops = """ - [i1, p2] - i2 = getfield_gc(p2, descr=adescr) - escape_n(i2) - p3 = new(descr=ssize) - setfield_gc(p3, i1, descr=adescr) - jump(i1, p3) - """ - expected = """ - [i1, i2] - escape_n(i2) - jump(i1, i1) - """ - self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected) - def test_p123_vstruct(self): ops = """ [i1, p2, p3] @@ -1443,26 +1270,6 @@ """ self.optimize_loop(ops, expected) - def test_duplicate_getfield_guard_value_const(self): - ops = """ - [p1] - guard_value(p1, ConstPtr(myptr)) [] - i1 = getfield_gc_i(p1, descr=valuedescr) - i2 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i2) - jump(p1) - """ - expected = """ - [] - i1 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i1) - jump() - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr)', expected) - def test_duplicate_getfield_sideeffects_1(self): ops = """ [p1] @@ -1688,12 +1495,12 @@ jump(p1, i1, i2) """ expected = """ - [i1, i2] + [p1, i1, i2] + guard_value(p1, ConstPtr(myptr)) [] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) - jump(i1, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr), Not, Not', expected) + jump(ConstPtr(myptr), i1, i2) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_1(self): ops = """ @@ -1870,163 +1677,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_1(self): - ops = """ - [i0, p1] - p4 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_with_vtable(descr=nodesize) - p3 = escape_r() - setfield_gc(p2, p3, descr=nextdescr) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, nextdescr=Not)', - expected) - - def test_bug_2(self): - ops = """ - [i0, p1] - p4 = getarrayitem_gc(p1, 0, descr=arraydescr2) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_array(1, descr=arraydescr2) - p3 = escape_r() - setarrayitem_gc(p2, 0, p3, descr=arraydescr2) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr2, Not)', - expected) - - def test_bug_3(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - p2a = new_with_vtable(descr=nodesize) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_bug_3bis(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_invalid_loop_1(self): - ops = """ - [p1] - guard_isnull(p1) [] - # - p2 = new_with_vtable(descr=nodesize) - jump(p2) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, - ops, 'Virtual(node_vtable)', None) - - def test_invalid_loop_2(self): - py.test.skip("this would fail if we had Fixed again in the specnodes") - ops = """ - [p1] - guard_class(p1, ConstClass(node_vtable2)) [] - # - p2 = new_with_vtable(descr=nodesize) - escape_n(p2) # prevent it from staying Virtual - jump(p2) - """ - py.test.raises(InvalidLoop, self.optimize_loop, - ops, '...', None) - - def test_invalid_loop_3(self): - ops = """ - [p1] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_isnull(p2) [] - # - p3 = new_with_vtable(descr=nodesize) - p4 = new_with_vtable(descr=nodesize) - setfield_gc(p3, p4, descr=nextdescr) - jump(p3) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, ops, - 'Virtual(node_vtable, nextdescr=Virtual(node_vtable))', - None) - def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2060,7 +1711,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2078,7 +1728,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2625,26 +2274,6 @@ where p2 is a node_vtable, valuedescr=i2, nextdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_6(self): - ops = """ - [p0, i0, i1] - guard_true(i0) [p0] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i1, i1) - """ - expected = """ - [i1b, i0, i1] - guard_true(i0) [i1b] - jump(i1, i1, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable, valuedescr=Not), - Not, Not''', expected) - self.check_expanded_fail_descr('''p0 - where p0 is a node_vtable, valuedescr=i1b - ''', rop.GUARD_TRUE) - def test_expand_fail_varray(self): ops = """ [i1] @@ -2686,47 +2315,6 @@ where p2 is a vstruct ssize, adescr=i1, bdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_v_all_1(self): - ops = """ - [i1, p1a, i2] - p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) - p7v = getfield_gc_r(p6s, descr=bdescr) - p5s = new(descr=ssize) - setfield_gc(p5s, i2, descr=adescr) - setfield_gc(p5s, p7v, descr=bdescr) - setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) - guard_true(i1) [p1a] - p2s = new(descr=ssize) - p3v = new_with_vtable(descr=nodesize) - p4a = new_array(2, descr=arraydescr2) - setfield_gc(p2s, i1, descr=adescr) - setfield_gc(p2s, p3v, descr=bdescr) - setfield_gc(p3v, i2, descr=valuedescr) - setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) - jump(i1, p4a, i2) - """ - expected = """ - [i1, ia, iv, pnull, i2] - guard_true(i1) [ia, iv, i2] - jump(1, 1, i2, NULL, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, ''' - Not, - VArray(arraydescr2, - VStruct(ssize, - adescr=Not, - bdescr=Virtual(node_vtable, - valuedescr=Not)), - Not), - Not''', expected) - self.check_expanded_fail_descr('''p1a - where p1a is a varray arraydescr2: p6s, p5s - where p6s is a vstruct ssize, adescr=ia, bdescr=p7v - where p5s is a vstruct ssize, adescr=i2, bdescr=p7v - where p7v is a node_vtable, valuedescr=iv - ''', rop.GUARD_TRUE) - def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] @@ -5179,6 +4767,8 @@ """ self.optimize_loop(ops, expected) + def test_intmod_bounds_harder(self): + py.test.skip("harder") # Of course any 'maybe-negative % power-of-two' can be turned into # int_and(), but that's a bit harder to detect here because it turns # into several operations, and of course it is wrong to just turn @@ -5196,7 +4786,6 @@ i4 = int_and(i0, 15) finish(i4) """ - py.test.skip("harder") self.optimize_loop(ops, expected) def test_intmod_bounds_bug1(self): @@ -5357,7 +4946,7 @@ i5 = int_lt(i2, i1) guard_true(i5) [] - i6 = getarrayitem_gc(p0, i2) + i6 = getarrayitem_gc_i(p0, i2, descr=chararraydescr) finish(i6) """ expected = """ @@ -5368,7 +4957,7 @@ i4 = int_lt(i2, i0) guard_true(i4) [] - i6 = getarrayitem_gc(p0, i3) + i6 = getarrayitem_gc_i(p0, i3, descr=chararraydescr) finish(i6) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2969,7 +2969,6 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3015,7 +3014,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3039,7 +3037,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -35,15 +35,11 @@ return True return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except lltype.DelayedPointer: + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: return True - if getattr(funcobj, 'random_effects_on_gcobjs', False): - return True - return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_external_call( + self, funcobj, seen) def analyze_simple_operation(self, op, graphinfo): if op.opname in ('malloc', 'malloc_varsize'): flags = op.args[1].value diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -22,7 +22,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, + Ptr, ContainerType, FuncType, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper import rclass @@ -876,18 +876,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -22,8 +22,7 @@ log.WARNING("Unknown operation: %s" % op.opname) return True - def analyze_external_call(self, op, seen=None): - fnobj = op.args[0].value._obj + def analyze_external_call(self, fnobj, seen=None): return getattr(fnobj, 'canraise', True) analyze_exceptblock = None # don't call this diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -21,12 +21,8 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -55,11 +54,7 @@ def analyze_startblock(self, block, seen=None): return self.bottom_result() - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except DelayedPointer: - return self.bottom_result() + def analyze_external_call(self, funcobj, seen=None): result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper @@ -80,12 +75,24 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - graph = get_graph(op.args[0], self.translator) - if graph is None: - x = self.analyze_external_call(op, seen) + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() + if getattr(funcobj, 'external', None) is not None: + x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -204,8 +204,7 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - z = lltype.functionptr(lltype.FuncType([lltype.Signed], lltype.Signed), - 'foobar') + z = llexternal('z', [lltype.Signed], lltype.Signed) def g(x): return z(x) t, ra = self.translate(g, [int]) diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -1,7 +1,7 @@ import random from rpython.tool.algo.unionfind import UnionFind -from rpython.translator.backendopt.graphanalyze import Dependency -from rpython.translator.backendopt.graphanalyze import DependencyTracker +from rpython.translator.backendopt.graphanalyze import (Dependency, + DependencyTracker, BoolGraphAnalyzer) class FakeGraphAnalyzer: @@ -49,3 +49,30 @@ method1 = rectrack(n, tracker) method2 = expected(n) assert method1 == method2 + + +def test_delayed_fnptr(): + from rpython.flowspace.model import SpaceOperation + from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.translator.translator import TranslationContext + t = TranslationContext() + t.buildannotator() + t.buildrtyper() + annhelper = MixLevelHelperAnnotator(t.rtyper) + def f(): + pass + c_f = annhelper.constfunc(f, [], None) + op = SpaceOperation('direct_call', [c_f], None) + analyzer = BoolGraphAnalyzer(t) + assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Thu Jan 14 06:27:05 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 03:27:05 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: rewritten the last step of force_allocate_register, it could have taken (odd, even) instead of (even, odd) Message-ID: <56978609.624fc20a.9db1e.0ffd@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81759:97bc3df81867 Date: 2016-01-14 12:26 +0100 http://bitbucket.org/pypy/pypy/changeset/97bc3df81867/ Log: rewritten the last step of force_allocate_register, it could have taken (odd, even) instead of (even, odd) adding 1 word more space to the stack to remap_frame_layout (if it needs stack space to break the cycle) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -786,11 +786,14 @@ arguments.append(box.getfloatstorage()) else: assert 0, box.type + #import pdb; pdb.set_trace() deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) fail = cpu.get_latest_descr(deadframe) + print("exited at %s" % (fail, )) do_assert(fail is self.should_fail_by.getdescr(), "Got %r, expected %r" % (fail, self.should_fail_by.getdescr())) + values = [] for i, v in enumerate(self.get_fail_args()): if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case @@ -805,6 +808,8 @@ self.expected[v], i) ) + values.append(value) + #import pdb; pdb.set_trace() exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and self.guard_op.is_guard_exception()): @@ -839,6 +844,7 @@ _fail_box.set_forwarded(None) # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) + subloop.inputargs = op.getfailargs()[:] self.subloops.append(subloop) # keep around for debugging if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) diff --git a/rpython/jit/backend/test/zll_stress.py b/rpython/jit/backend/test/zll_stress.py --- a/rpython/jit/backend/test/zll_stress.py +++ b/rpython/jit/backend/test/zll_stress.py @@ -19,4 +19,5 @@ r = Random() r.jumpahead(piece*99999999) for i in range(piece*per_piece, (piece+1)*per_piece): + print("got", i, r.getstate()) check_random_function(cpu, LLtypeOperationBuilder, r, i, total_iterations) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -82,6 +82,11 @@ if self.is_call_release_gil: self.subtracted_to_sp += 8*WORD base -= 8*WORD + # one additional owrd for remap frame layout + # regalloc_push will overwrite -8(r.SP) and destroy + # a parameter if we would not reserve that space + base -= WORD + self.subtracted_to_sp += WORD for idx,i in enumerate(stack_params): loc = arglocs[i] offset = base + 8 * idx @@ -100,6 +105,7 @@ self.asm.regalloc_mov(loc, src) self.mc.STG(src, l.addr(offset, r.SP)) + # We must also copy fnloc into FNREG non_float_locs.append(self.fnloc) non_float_regs.append(r.RETURN) @@ -113,6 +119,7 @@ remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.SCRATCH) + def push_gcmap(self): # we push *now* the gcmap, describing the status of GC registers # after the rearrangements done just before, ignoring the return diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -208,14 +208,16 @@ del self.free_regs[i] i = self.free_regs.index(odd) del self.free_regs[i] + assert even.is_even() and odd.is_odd() return even, odd else: # an odd free register, maybe the even one is # a candidate? odd = even - even = REGS[even.value-1] + even = REGS[odd.value-1] if even in r.MANAGED_REGS and even not in self.free_regs: # yes even might be a candidate + # this means that odd is free, but not even candidates.append(even) i -= 1 @@ -244,44 +246,55 @@ if candidate is not None: # well, we got away with a single spill :) reg = self.reg_bindings[candidate] - self.force_spill_var(candidate) + self._sync_var(candidate) + del self.reg_bindings[candidate] if reg.is_even(): + assert var is not candidate self.reg_bindings[var] = reg rmfree = REGS[reg.value+1] - rmidx = self.free_regs.index(reg) - del self.free_regs[rmidx] self.reg_bindings[var2] = rmfree - rmidx = self.free_regs.index(rmfree) - del self.free_regs[rmidx] + self.free_regs = [fr for fr in self.free_regs if fr is not rmfree] return reg, rmfree else: + assert var2 is not candidate self.reg_bindings[var2] = reg - rmidx = self.free_regs.index(reg) - del self.free_regs[rmidx] rmfree = REGS[reg.value-1] self.reg_bindings[var] = rmfree - rmidx = self.free_regs.index(rmfree) - del self.free_regs[rmidx] + self.free_regs = [fr for fr in self.free_regs if fr is not rmfree] return rmfree, reg # there is no candidate pair that only would # require one spill, thus we need to spill two! + # this is a rare case! + reverse_mapping = { reg : var for var, reg in self.reg_bindings.items() } # always take the first for i, reg in enumerate(r.MANAGED_REGS): + if i % 2 == 1: + continue if i+1 < len(r.MANAGED_REGS): reg2 = r.MANAGED_REGS[i+1] - try: - even = self._spill_var(var, forbidden_vars, reg) - odd = self._spill_var(var2, forbidden_vars, reg2) - except NoVariableToSpill: - # woops, this is in efficient + assert reg.is_even() and reg2.is_odd() + ovar = reverse_mapping[reg] + ovar2 = reverse_mapping[reg2] + if ovar in forbidden_vars or ovar2 in forbidden_vars: + # blocked, try other register pair continue + even = reg + odd = reg2 + self._sync_var(ovar) + self._sync_var(ovar2) + del self.reg_bindings[ovar] + del self.reg_bindings[ovar2] + # both are not added to free_regs! no need to do so self.reg_bindings[var] = even self.reg_bindings[var2] = odd break else: # no break! this is bad. really bad raise NoVariableToSpill() + + reverse_mapping = None + return even, odd def force_result_in_even_reg(self, result_v, loc, forbidden_vars=[]): From pypy.commits at gmail.com Thu Jan 14 07:07:44 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 14 Jan 2016 04:07:44 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: add myself Message-ID: <56978f90.c74fc20a.ae108.20fb@mx.google.com> Author: fijal Branch: extradoc Changeset: r5594:d92491ed5a41 Date: 2016-01-14 12:06 +0000 http://bitbucket.org/pypy/extradoc/changeset/d92491ed5a41/ Log: add myself diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -16,6 +16,7 @@ Matti Picus 20-25 Ermina Manuel Jacob 20-28 Ermina Richard Plangger 20-28 Ermina +Maciej Fijalkowski 20-? Ermina (big room preferred) ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room @@ -30,7 +31,6 @@ ==================== ============== ===================== Name Arrive/Depart Accomodation ==================== ============== ===================== -Maciej Fijalkowski ? ? Remi Meier ? ? Sebastian Pawlus ? ? Manuel Jacob ? ? From pypy.commits at gmail.com Thu Jan 14 07:07:46 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 14 Jan 2016 04:07:46 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: fix merge Message-ID: <56978f92.c4b1c20a.5de3b.1e9d@mx.google.com> Author: fijal Branch: extradoc Changeset: r5595:4293181cb93f Date: 2016-01-14 12:07 +0000 http://bitbucket.org/pypy/extradoc/changeset/4293181cb93f/ Log: fix merge diff --git a/blog/draft/cffi-embedding.rst b/blog/draft/cffi-embedding.rst --- a/blog/draft/cffi-embedding.rst +++ b/blog/draft/cffi-embedding.rst @@ -6,6 +6,10 @@ Python programs, in a way that is both simple and that works across CPython 2.x and 3.x and PyPy. +This post assumes that you know what CFFI is and how to use it in +API mode (``ffi.cdef()``, ``ffi.set_source()``, ``ffi.compile()``). +A quick overview can be found here__. + The major news of CFFI 1.4, released last december, was that you can now declare C functions with ``extern "Python"``, in the ``cdef()``. These magic keywords make the function callable from C (where it is @@ -51,7 +55,7 @@ supports them, just by exporting the API expected for such plugins. This is still being finalized, but please try it out. (You can also -see `embedding.py`_ directly online for a quick glance.) Here are +see `embedding.py`_ directly online for a quick glance.) See below the instructions on Linux with CPython 2.7 (CPython 3.x and non-Linux platforms are still a work in progress right now, but this should be quickly fixed): @@ -86,30 +90,31 @@ Very similar steps can be followed on PyPy, but it requires the ``cffi-static-callback-embedding`` branch of PyPy, which you must -first translate from sources. The difference is only that you need to +first translate from sources. The difference is then that you need to adapt the first ``gcc`` command line: replace ``-lpython2.7`` with ``-lpypy-c`` and to fix the ``-I`` path (and possibly add a ``-L`` path). -Note that CPython/PyPy is automatically initialized (using locks in -case of multi-threading) the first time any of the ``extern "Python"`` +Note that CPython/PyPy is automatically initialized (using locks in case +of multi-threading) the first time any of the ``extern "Python"`` functions is called from the C program. (This should work even if two different threads call the first time a function from two *different* embedded CFFI extensions; in other words, explicit initialization is never needed). The custom initialization-time Python code you put in -``ffi.embedding_init_code()`` is executed. If this code starts to be -big, you can move it to independent modules or packages. Then the -initialization-time Python code only needs to import them. In that -case, you have to carefully set up ``sys.path`` if the modules are not -installed in the usual Python way. +``ffi.embedding_init_code()`` is executed at that time. If this code +starts to be big, you can move it to independent modules or packages. +Then the initialization-time Python code only needs to import them. In +that case, you have to carefully set up ``sys.path`` if the modules are +not installed in the usual Python way. -A better alternative would be to use virtualenv. How to do that is -not fully fleshed out so far. You can certainly run the whole program -with the environment variables set up by the virtualenv's ``activate`` -script first. There are probably other solutions that involve using -gcc's ``-Wl,-rpath=\$ORIGIN/`` or ``-Wl,-rpath=/fixed/path/`` options -to load a specific libpython or libypypy-c library. If you try it out -and it doesn't work the way you would like, please complain :-) +If the Python code is big and full of dependencies, a better alternative +would be to use virtualenv. How to do that is not fully fleshed out so +far. You can certainly run the whole program with the environment +variables set up by the virtualenv's ``activate`` script first. There +are probably other solutions that involve using gcc's +``-Wl,-rpath=\$ORIGIN/`` or ``-Wl,-rpath=/fixed/path/`` options to load +a specific libpython or libypypy-c library. If you try it out and it +doesn't work the way you would like, please complain ``:-)`` Another point: right now this does not support CPython's notion of multiple subinterpreters. The logic creates a single global Python diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -17,6 +17,7 @@ Manuel Jacob 20-28 Ermina Richard Plangger 20-28 Ermina Maciej Fijalkowski 20-? Ermina (big room preferred) +Ronan Lamy 20-27 Ermina? ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Thu Jan 14 08:38:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 05:38:44 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: realloc frame needs new size in the right register, another zll_stress test passing Message-ID: <5697a4e4.c4b61c0a.55951.6d1f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81760:1b048c0f98d6 Date: 2016-01-14 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/1b048c0f98d6/ Log: realloc frame needs new size in the right register, another zll_stress test passing diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -786,7 +786,6 @@ arguments.append(box.getfloatstorage()) else: assert 0, box.type - #import pdb; pdb.set_trace() deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) fail = cpu.get_latest_descr(deadframe) print("exited at %s" % (fail, )) @@ -809,7 +808,6 @@ i) ) values.append(value) - #import pdb; pdb.set_trace() exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and self.guard_op.is_guard_exception()): diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -587,7 +587,7 @@ mc.write('\x00'*14) self.mc.push_std_frame() mc.load_imm(r.RETURN, self._frame_realloc_slowpath) - self.load_gcmap(mc, r.SCRATCH, gcmap) + self.load_gcmap(mc, r.r1, gcmap) mc.raw_call() self.mc.pop_std_frame() @@ -601,7 +601,7 @@ # three traps, so exactly three instructions to patch here pmc.CGFI(r.SCRATCH2, l.imm(frame_depth)) pmc.BRC(c.EQ, l.imm(jmp_target - (traps_pos + 6))) - pmc.LGHI(r.SCRATCH, l.imm(frame_depth)) + pmc.LGHI(r.r0, l.imm(frame_depth)) pmc.overwrite() @rgc.no_release_gil From pypy.commits at gmail.com Thu Jan 14 08:38:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 05:38:46 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed print statements Message-ID: <5697a4e6.4e8e1c0a.10986.6149@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81761:e8159a73d818 Date: 2016-01-14 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/e8159a73d818/ Log: removed print statements diff --git a/rpython/jit/backend/test/zll_stress.py b/rpython/jit/backend/test/zll_stress.py --- a/rpython/jit/backend/test/zll_stress.py +++ b/rpython/jit/backend/test/zll_stress.py @@ -19,5 +19,4 @@ r = Random() r.jumpahead(piece*99999999) for i in range(piece*per_piece, (piece+1)*per_piece): - print("got", i, r.getstate()) check_random_function(cpu, LLtypeOperationBuilder, r, i, total_iterations) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -58,7 +58,6 @@ gpr_regs = 0 fpr_regs = 0 stack_params = [] - print("### prepare_arguemtns:") for i in range(num_args): loc = arglocs[i] if not arglocs[i].is_float(): @@ -66,10 +65,8 @@ non_float_locs.append(arglocs[i]) non_float_regs.append(self.GPR_ARGS[gpr_regs]) gpr_regs += 1 - print(" %d: %s at [%s];" % (i, arglocs[i], self.GPR_ARGS[gpr_regs-1])) else: stack_params.append(i) - print(" %d: %s at stack[%d];" % (i,arglocs[i], len(stack_params)-1)) else: if fpr_regs < max_fpr_in_reg: float_locs.append(arglocs[i]) From pypy.commits at gmail.com Thu Jan 14 10:44:57 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 07:44:57 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: translation issue in guard_subclass, fixed autoencoding tests (had too generic argtype annotations) Message-ID: <5697c279.8673c20a.08fa.6fdb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81762:b3a326be8820 Date: 2016-01-14 16:44 +0100 http://bitbucket.org/pypy/pypy/changeset/b3a326be8820/ Log: translation issue in guard_subclass, fixed autoencoding tests (had too generic argtype annotations) diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -1,4 +1,4 @@ -from rpython.jit.metainterp.history import ConstInt, FLOAT, Const +from rpython.jit.metainterp.history import AbstractValue, ConstInt, FLOAT, Const from rpython.jit.backend.zarch.locations import imm, addr from rpython.jit.backend.llsupport.regalloc import TempVar import rpython.jit.backend.zarch.registers as r diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -269,8 +269,8 @@ encode_base_displace(self, len_base_disp) return encode_ssf -def build_rs(mnemonic, (opcode,)): - @builder.arguments('r,r,bd') +def build_rs(mnemonic, (opcode,), argtypes='r,r,bd'): + @builder.arguments(argtypes) def encode_rs(self, reg1, reg3, base_displace): self.writechar(opcode) self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -25,7 +25,7 @@ 'MSGR': ('rre', ['\xB9','\x0C']), 'MSG': ('rxy', ['\xE3','\x0C']), 'MSGFI': ('ril', ['\xC2','\x00']), - 'MLGR': ('rre', ['\xB9','\x86']), + 'MLGR': ('rre', ['\xB9','\x86'], 'eo,r'), # div/mod 'DSGR': ('rre', ['\xB9','\x0D'], 'eo,r'), 'DSG': ('rxy', ['\xE3','\x0D'], 'eo,bidl'), @@ -114,8 +114,8 @@ 'XI': ('si', ['\x97']), 'XIY': ('siy', ['\xEB','\x57']), - 'XILF': ('ril', ['\xC0','\x06']), - 'XIHF': ('ril', ['\xC0','\x07']), + 'XILF': ('ril', ['\xC0','\x07'], 'r/m,u32'), + 'XIHF': ('ril', ['\xC0','\x06'], 'r/m,u32'), # OR immediate 'OIHH': ('ri_u', ['\xA5', '\x08']), @@ -130,7 +130,7 @@ 'LAY': ('rxy', ['\xE3','\x71']), # move - 'MVCLE': ('rs', ['\xA8']), + 'MVCLE': ('rs', ['\xA8'], 'eo,eo,bd'), # load memory diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -755,7 +755,7 @@ # read this field to get the vtable pointer self.mc.LG(r.SCRATCH2, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field - assert check_imm(offset2) + assert check_imm_value(offset2) self.mc.load(r.SCRATCH2, r.SCRATCH2, offset2) else: # read the typeid From pypy.commits at gmail.com Thu Jan 14 11:28:18 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 08:28:18 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: tried hard and failed to have the embedding tests run in a debug-mode python Message-ID: <5697cca2.8205c20a.ca9cc.7dbd@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2582:ac536db9c3c1 Date: 2016-01-14 17:25 +0100 http://bitbucket.org/cffi/cffi/changeset/ac536db9c3c1/ Log: tried hard and failed to have the embedding tests run in a debug- mode python diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -4,6 +4,10 @@ from testing.udir import udir import cffi +if hasattr(sys, 'gettotalrefcount'): + py.test.skip("tried hard and failed to have these tests run " + "in a debug-mode python") + local_dir = os.path.dirname(os.path.abspath(__file__)) _link_error = '?' From pypy.commits at gmail.com Thu Jan 14 11:44:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 08:44:48 -0800 (PST) Subject: [pypy-commit] pypy globals-quasiimmut: close to-be-merged branch Message-ID: <5697d080.c4b1c20a.5de3b.ffff883f@mx.google.com> Author: Carl Friedrich Bolz Branch: globals-quasiimmut Changeset: r81763:9b15516b463a Date: 2016-01-14 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/9b15516b463a/ Log: close to-be-merged branch From pypy.commits at gmail.com Thu Jan 14 11:50:15 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 08:50:15 -0800 (PST) Subject: [pypy-commit] pypy default: a whatsnew entry Message-ID: <5697d1c7.4c0c1c0a.25017.ffffaf3b@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81765:7b97ce35aeb2 Date: 2016-01-14 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/7b97ce35aeb2/ Log: a whatsnew entry diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,7 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. From pypy.commits at gmail.com Thu Jan 14 11:50:13 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 08:50:13 -0800 (PST) Subject: [pypy-commit] pypy default: merge globals-quasiimmut: Message-ID: <5697d1c5.e44bc20a.eb52f.31fc@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81764:3c41c2563421 Date: 2016-01-14 17:48 +0100 http://bitbucket.org/pypy/pypy/changeset/3c41c2563421/ Log: merge globals-quasiimmut: - make most global lookups take zero operations, even in the preamble and in bridges - reduce the size of most frames by one word (they don't have to store the globals dict any more in most cases) diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -153,7 +153,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -42,6 +42,14 @@ class W_DictMultiObject(W_Root): + """ Abstract base class that does not store a strategy. """ + def get_strategy(self): + raise NotImplementedError("abstract method") + + def set_strategy(self, strategy): + raise NotImplementedError("abstract method") + + @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, strdict=False, @@ -52,6 +60,10 @@ # every module needs its own strategy, because the strategy stores # the version tag strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) + W_ModuleDictObject.__init__(w_obj, space, strategy, storage) + return w_obj elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) @@ -68,18 +80,17 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_obj = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_obj, space, strategy, storage) + w_obj = space.allocate_instance(W_DictObject, w_type) + W_DictObject.__init__(w_obj, space, strategy, storage) return w_obj - def __init__(self, space, strategy, storage): + def __init__(self, space, storage): self.space = space - self.strategy = strategy self.dstorage = storage def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.get_strategy()) def unwrap(w_dict, space): result = {} @@ -101,7 +112,7 @@ self.setitem(w_k, w_v) def setitem_str(self, key, w_value): - self.strategy.setitem_str(self, key, w_value) + self.get_strategy().setitem_str(self, key, w_value) @staticmethod def descr_new(space, w_dicttype, __args__): @@ -261,8 +272,9 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.has_iterreversed: - it = self.strategy.iterreversed(self) + strategy = self.get_strategy() + if strategy.has_iterreversed: + it = strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: # fall-back @@ -337,6 +349,37 @@ init_or_update(space, self, __args__, 'dict.update') +class W_DictObject(W_DictMultiObject): + """ a regular dict object """ + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.dstrategy = strategy + + def get_strategy(self): + return self.dstrategy + + def set_strategy(self, strategy): + self.dstrategy = strategy + + +class W_ModuleDictObject(W_DictMultiObject): + """ a dict object for a module, that is not expected to change. It stores + the strategy as a quasi-immutable field. """ + _immutable_fields_ = ['mstrategy?'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.mstrategy = strategy + + def get_strategy(self): + return self.mstrategy + + def set_strategy(self, strategy): + self.mstrategy = strategy + + + + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ popitem delitem clear \ @@ -347,7 +390,7 @@ def make_method(method): def f(self, *args): - return getattr(self.strategy, method)(self, *args) + return getattr(self.get_strategy(), method)(self, *args) f.func_name = method return f @@ -490,7 +533,7 @@ def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def listview_bytes(self, w_dict): @@ -556,32 +599,32 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(UnicodeDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_int_strategy(self, w_dict): strategy = self.space.fromcache(IntDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_identity_strategy(self, w_dict): from pypy.objspace.std.identitydict import IdentityDictStrategy strategy = self.space.fromcache(IdentityDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_object_strategy(self, w_dict): strategy = self.space.fromcache(ObjectDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def getitem(self, w_dict, w_key): @@ -662,7 +705,7 @@ if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 - if self.strategy is self.dictimplementation.strategy: + if self.strategy is self.dictimplementation.get_strategy(): return result # common case else: # waaa, obscure case: the strategy changed, but not the @@ -804,7 +847,7 @@ else: return # w_dict is completely empty, nothing to do count = w_dict.length() - 1 - w_updatedict.strategy.prepare_update(w_updatedict, count) + w_updatedict.get_strategy().prepare_update(w_updatedict, count) # If the strategy is still different, continue the slow way if not same_strategy(self, w_updatedict): for key, value, keyhash in iteritemsh: @@ -825,7 +868,7 @@ def same_strategy(self, w_otherdict): return (setitem_untyped is not None and - w_otherdict.strategy is self) + w_otherdict.get_strategy() is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -934,7 +977,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, value in d.iteritems(): d_new[self.wrap(key)] = value - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) # --------------- iterator interface ----------------- @@ -1178,7 +1221,7 @@ def update1_dict_dict(space, w_dict, w_data): - w_data.strategy.rev_update1_dict_dict(w_data, w_dict) + w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -18,7 +18,7 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage @@ -142,7 +142,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): d_new[self.wrap(keys[i])] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def switch_to_bytes_strategy(self, w_dict): @@ -152,7 +152,7 @@ d_new = strategy.unerase(storage) for i in range(len(keys)): d_new[keys[i]] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def view_as_kwargs(self, w_dict): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -6,7 +6,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, - BaseValueIterator, BaseItemIterator, _never_equal_to_string + BaseValueIterator, BaseItemIterator, _never_equal_to_string, + W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell @@ -407,7 +408,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) - w_dict = W_DictMultiObject(space, strategy, storage) + w_dict = W_DictObject(space, strategy, storage) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag return w_dict @@ -422,8 +423,8 @@ # new dict. If the old dict was using the MapDictStrategy, we # have to force it now: otherwise it would remain an empty # shell that continues to delegate to 'self'. - if type(w_olddict.strategy) is MapDictStrategy: - w_olddict.strategy.switch_to_object_strategy(w_olddict) + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag @@ -641,7 +642,7 @@ w_obj = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) dict_w = strategy.unerase(strategy.get_empty_storage()) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(dict_w) assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None materialize_r_dict(self.space, w_obj, dict_w) @@ -750,7 +751,7 @@ def next_key_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -772,7 +773,7 @@ def next_value_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -793,7 +794,7 @@ def next_item_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None, None if self.curr_map: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -18,7 +18,7 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, W_DictObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject @@ -439,7 +439,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_bytes() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() @@ -454,7 +454,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_unicode() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() @@ -467,7 +467,7 @@ def listview_int(self, w_obj): if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() @@ -485,7 +485,7 @@ return None def view_as_kwargs(self, w_dict): - if type(w_dict) is W_DictMultiObject: + if type(w_dict) is W_DictObject: return w_dict.view_as_kwargs() return (None, None) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,7 +1,7 @@ import py from pypy.objspace.std.celldict import ModuleDictStrategy -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictObject, W_ModuleDictObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, FakeString) @@ -14,7 +14,7 @@ def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version key = "a" @@ -23,30 +23,30 @@ v2 = strategy.version assert v1 is not v2 assert d.getitem(w_key) == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) == 1 d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 assert d.getitem(w_key) == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 2 d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 assert d.getitem(w_key) == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 3 d.delitem(w_key) v5 = strategy.version assert v5 is not v4 assert d.getitem(w_key) is None - assert d.strategy.getdictvalue_no_unwrapping(d, key) is None + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version x = object() @@ -134,7 +134,7 @@ py.test.skip("__repr__ doesn't work on appdirect") strategy = ModuleDictStrategy(cls.space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + cls.w_d = W_ModuleDictObject(cls.space, strategy, storage) def test_popitem(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,14 +2,14 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + W_DictObject, BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): def test_empty(self): d = self.space.newdict() assert not self.space.is_true(d) - assert type(d.strategy) is not ObjectDictStrategy + assert type(d.get_strategy()) is not ObjectDictStrategy def test_nonempty(self): space = self.space @@ -1050,7 +1050,7 @@ return l def newlist_bytes(self, l): return l - DictObjectCls = W_DictMultiObject + DictObjectCls = W_DictObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str @@ -1076,7 +1076,7 @@ return tuple(l) def newdict(self, module=False, instance=False): - return W_DictMultiObject.allocate_and_init_instance( + return W_DictObject.allocate_and_init_instance( self, module=module, instance=instance) def view_as_kwargs(self, w_d): @@ -1105,7 +1105,7 @@ w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode - w_dict = W_DictMultiObject + w_dict = W_DictObject iter = iter fixedview = list listview = list @@ -1149,8 +1149,8 @@ def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() - w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None) - W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage) + w_dict = self.fakespace.allocate_instance(W_DictObject, None) + W_DictObject.__init__(w_dict, self.fakespace, strategy, storage) return w_dict def fill_impl(self): @@ -1159,7 +1159,7 @@ def check_not_devolved(self): #XXX check if strategy changed!? - assert type(self.impl.strategy) is self.StrategyClass + assert type(self.impl.get_strategy()) is self.StrategyClass #assert self.impl.r_dict_content is None def test_popitem(self): @@ -1246,7 +1246,7 @@ for x in xrange(100): impl.setitem(self.fakespace.str_w(str(x)), x) impl.setitem(x, x) - assert type(impl.strategy) is ObjectDictStrategy + assert type(impl.get_strategy()) is ObjectDictStrategy def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names @@ -1308,7 +1308,7 @@ class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) - self.impl.strategy.switch_to_object_strategy(self.impl) + self.impl.get_strategy().switch_to_object_strategy(self.impl) def check_not_devolved(self): pass @@ -1320,5 +1320,5 @@ def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.get_strategy()) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,5 +1,5 @@ import py -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.kwargsdict import * space = FakeSpace() @@ -9,7 +9,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -23,7 +23,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -52,7 +52,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -69,11 +69,11 @@ def test_limit_size(): storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) for i in range(100): assert d.setitem_str("d%s" % i, 4) is None - assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert d.get_strategy() is not strategy + assert "BytesDictStrategy" == d.get_strategy().__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -82,7 +82,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) w_l = d.w_keys() # does not crash def test_view_as_kwargs(): @@ -91,26 +91,27 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == [], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) d.setitem_str("a", 3) - assert isinstance(d.strategy, KwargsDictStrategy) + assert isinstance(d.get_strategy(), KwargsDictStrategy) from pypy.objspace.std.test.test_dictmultiobject import BaseTestRDictImplementation, BaseTestDevolvedDictImplementation def get_impl(self): storage = strategy.erase(([], [])) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) + class TestKwargsDictImplementation(BaseTestRDictImplementation): StrategyClass = KwargsDictStrategy get_impl = get_impl diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * class Config: @@ -309,7 +309,7 @@ obj.setdictvalue(space, "c", 7) assert obj.storage == [50, 60, 70, 5, 6, 7] - class FakeDict(W_DictMultiObject): + class FakeDict(W_DictObject): def __init__(self, d): self.dstorage = d @@ -368,7 +368,7 @@ def devolve_dict(space, obj): w_d = obj.getdict(space) - w_d.strategy.switch_to_object_strategy(w_d) + w_d.get_strategy().switch_to_object_strategy(w_d) def test_get_setdictvalue_after_devolve(): cls = Class() @@ -1127,7 +1127,7 @@ def test_newdict_instance(): w_dict = space.newdict(instance=True) - assert type(w_dict.strategy) is MapDictStrategy + assert type(w_dict.get_strategy()) is MapDictStrategy class TestMapDictImplementationUsingnewdict(BaseTestRDictImplementation): StrategyClass = MapDictStrategy diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -478,12 +478,12 @@ def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy - from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject if w_self.lazyloaders: w_self._cleanup_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) def is_heaptype(w_self): return w_self.flag_heaptype @@ -1139,7 +1139,7 @@ space = w_self.space caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_name = space.finditem(w_globals, space.wrap('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,7 @@ for key, w_value in vars.items(): space.setitem(self.w_locals, space.wrap(key), w_value) if isinstance(code, str): - return space.eval(code, self.w_globals, self.w_locals) + return space.eval(code, self.get_w_globals(), self.w_locals) pyc = pycode.PyCode._from_code(space, code) return pyc.exec_host_bytecode(self.w_globals, self.w_locals) exec_ = eval @@ -248,7 +248,7 @@ #if filename.endswith("pyc"): # filename = filename[:-1] try: - space.exec_(str(source), frame.w_globals, w_locals, + space.exec_(str(source), frame.get_w_globals(), w_locals, filename=filename) except OperationError, e: if e.match(space, w_ExpectedException): From pypy.commits at gmail.com Thu Jan 14 13:43:32 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 10:43:32 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: not every call needs to reload the pool register, but call_assembler must perform a reload Message-ID: <5697ec54.247bc20a.2f1f.ffffb688@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81766:30a2326ae1aa Date: 2016-01-14 19:42 +0100 http://bitbucket.org/pypy/pypy/changeset/30a2326ae1aa/ Log: not every call needs to reload the pool register, but call_assembler must perform a reload diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -1077,6 +1077,7 @@ self._store_force_index(self._find_nearby_operation(regalloc, +1)) # 'result_loc' is either r2, f0 or None self.call_assembler(op, argloc, vloc, result_loc, r.r2) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) emit_call_assembler_i = _genop_call_assembler emit_call_assembler_r = _genop_call_assembler From pypy.commits at gmail.com Thu Jan 14 13:55:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 14 Jan 2016 10:55:07 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed debugging lines scattered along several places outside of the zarch backend dir Message-ID: <5697ef0b.88d31c0a.103a2.213c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81767:6e2a46f9f524 Date: 2016-01-14 19:54 +0100 http://bitbucket.org/pypy/pypy/changeset/6e2a46f9f524/ Log: removed debugging lines scattered along several places outside of the zarch backend dir diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -84,7 +84,7 @@ # t = TranslationContext() t.config.translation.gc = gc - t.config.translation.lldebug = True # XXX debug + # t.config.translation.lldebug = True # pretty useful when debugging assembly if gc != 'boehm': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -820,7 +820,7 @@ frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - tgt_depth = jump_target_descr._zarch_clt.frame_info.jfi_frame_depth + tgt_depth = jump_target_descr._ppc_clt.frame_info.jfi_frame_depth target_frame_depth = tgt_depth - JITFRAME_FIXED_SIZE frame_depth = max(frame_depth, target_frame_depth) return frame_depth diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -788,11 +788,9 @@ assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) fail = cpu.get_latest_descr(deadframe) - print("exited at %s" % (fail, )) do_assert(fail is self.should_fail_by.getdescr(), "Got %r, expected %r" % (fail, self.should_fail_by.getdescr())) - values = [] for i, v in enumerate(self.get_fail_args()): if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case @@ -807,7 +805,6 @@ self.expected[v], i) ) - values.append(value) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and self.guard_op.is_guard_exception()): @@ -842,7 +839,6 @@ _fail_box.set_forwarded(None) # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) - subloop.inputargs = op.getfailargs()[:] self.subloops.append(subloop) # keep around for debugging if guard_op.is_guard_exception(): subloop.operations.append(exc_handling(guard_op)) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -2117,8 +2117,8 @@ return l[-2] # not the blackholed version res = self.meta_interp(f, [5, 8]) assert 14 < res < 42 - #res = self.meta_interp(f, [5, 2]) - #assert 4 < res < 14 + res = self.meta_interp(f, [5, 2]) + assert 4 < res < 14 def test_compute_identity_hash(self): from rpython.rlib.objectmodel import compute_identity_hash diff --git a/rpython/jit/metainterp/test/test_executor.py b/rpython/jit/metainterp/test/test_executor.py --- a/rpython/jit/metainterp/test/test_executor.py +++ b/rpython/jit/metainterp/test/test_executor.py @@ -281,8 +281,8 @@ yield (rop.FLOAT_NEG, [15.9], 'float', -15.9) yield (rop.FLOAT_ABS, [-5.9], 'float', 5.9) yield (rop.FLOAT_ABS, [15.9], 'float', 15.9) + yield (rop.CAST_FLOAT_TO_INT, [-5.9], 'int', -5) yield (rop.CAST_FLOAT_TO_INT, [5.9], 'int', 5) - yield (rop.CAST_FLOAT_TO_INT, [-5.9], 'int', -5) yield (rop.CAST_INT_TO_FLOAT, [123], 'float', 123.0) yield (rop.CAST_INT_TO_FLOAT, [-123], 'float', -123.0) From pypy.commits at gmail.com Thu Jan 14 16:53:09 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Thu, 14 Jan 2016 13:53:09 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: merge default Message-ID: <569818c5.e44bc20a.eb52f.ffff9d89@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81768:5de7752a044a Date: 2016-01-14 22:19 +0100 http://bitbucket.org/pypy/pypy/changeset/5de7752a044a/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,7 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -153,7 +153,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -42,6 +42,14 @@ class W_DictMultiObject(W_Root): + """ Abstract base class that does not store a strategy. """ + def get_strategy(self): + raise NotImplementedError("abstract method") + + def set_strategy(self, strategy): + raise NotImplementedError("abstract method") + + @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, strdict=False, @@ -52,6 +60,10 @@ # every module needs its own strategy, because the strategy stores # the version tag strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) + W_ModuleDictObject.__init__(w_obj, space, strategy, storage) + return w_obj elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) @@ -68,18 +80,17 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_obj = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_obj, space, strategy, storage) + w_obj = space.allocate_instance(W_DictObject, w_type) + W_DictObject.__init__(w_obj, space, strategy, storage) return w_obj - def __init__(self, space, strategy, storage): + def __init__(self, space, storage): self.space = space - self.strategy = strategy self.dstorage = storage def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.get_strategy()) def unwrap(w_dict, space): result = {} @@ -101,7 +112,7 @@ self.setitem(w_k, w_v) def setitem_str(self, key, w_value): - self.strategy.setitem_str(self, key, w_value) + self.get_strategy().setitem_str(self, key, w_value) @staticmethod def descr_new(space, w_dicttype, __args__): @@ -261,8 +272,9 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.has_iterreversed: - it = self.strategy.iterreversed(self) + strategy = self.get_strategy() + if strategy.has_iterreversed: + it = strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: # fall-back @@ -337,6 +349,37 @@ init_or_update(space, self, __args__, 'dict.update') +class W_DictObject(W_DictMultiObject): + """ a regular dict object """ + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.dstrategy = strategy + + def get_strategy(self): + return self.dstrategy + + def set_strategy(self, strategy): + self.dstrategy = strategy + + +class W_ModuleDictObject(W_DictMultiObject): + """ a dict object for a module, that is not expected to change. It stores + the strategy as a quasi-immutable field. """ + _immutable_fields_ = ['mstrategy?'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.mstrategy = strategy + + def get_strategy(self): + return self.mstrategy + + def set_strategy(self, strategy): + self.mstrategy = strategy + + + + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ popitem delitem clear \ @@ -347,7 +390,7 @@ def make_method(method): def f(self, *args): - return getattr(self.strategy, method)(self, *args) + return getattr(self.get_strategy(), method)(self, *args) f.func_name = method return f @@ -490,7 +533,7 @@ def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def listview_bytes(self, w_dict): @@ -556,32 +599,32 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(UnicodeDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_int_strategy(self, w_dict): strategy = self.space.fromcache(IntDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_identity_strategy(self, w_dict): from pypy.objspace.std.identitydict import IdentityDictStrategy strategy = self.space.fromcache(IdentityDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_object_strategy(self, w_dict): strategy = self.space.fromcache(ObjectDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def getitem(self, w_dict, w_key): @@ -662,7 +705,7 @@ if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 - if self.strategy is self.dictimplementation.strategy: + if self.strategy is self.dictimplementation.get_strategy(): return result # common case else: # waaa, obscure case: the strategy changed, but not the @@ -804,7 +847,7 @@ else: return # w_dict is completely empty, nothing to do count = w_dict.length() - 1 - w_updatedict.strategy.prepare_update(w_updatedict, count) + w_updatedict.get_strategy().prepare_update(w_updatedict, count) # If the strategy is still different, continue the slow way if not same_strategy(self, w_updatedict): for key, value, keyhash in iteritemsh: @@ -825,7 +868,7 @@ def same_strategy(self, w_otherdict): return (setitem_untyped is not None and - w_otherdict.strategy is self) + w_otherdict.get_strategy() is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -934,7 +977,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, value in d.iteritems(): d_new[self.wrap(key)] = value - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) # --------------- iterator interface ----------------- @@ -1178,7 +1221,7 @@ def update1_dict_dict(space, w_dict, w_data): - w_data.strategy.rev_update1_dict_dict(w_data, w_dict) + w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -18,7 +18,7 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage @@ -142,7 +142,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): d_new[self.wrap(keys[i])] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def switch_to_bytes_strategy(self, w_dict): @@ -152,7 +152,7 @@ d_new = strategy.unerase(storage) for i in range(len(keys)): d_new[keys[i]] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def view_as_kwargs(self, w_dict): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -6,7 +6,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, - BaseValueIterator, BaseItemIterator, _never_equal_to_string + BaseValueIterator, BaseItemIterator, _never_equal_to_string, + W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell @@ -407,7 +408,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) - w_dict = W_DictMultiObject(space, strategy, storage) + w_dict = W_DictObject(space, strategy, storage) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag return w_dict @@ -422,8 +423,8 @@ # new dict. If the old dict was using the MapDictStrategy, we # have to force it now: otherwise it would remain an empty # shell that continues to delegate to 'self'. - if type(w_olddict.strategy) is MapDictStrategy: - w_olddict.strategy.switch_to_object_strategy(w_olddict) + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag @@ -641,7 +642,7 @@ w_obj = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) dict_w = strategy.unerase(strategy.get_empty_storage()) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(dict_w) assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None materialize_r_dict(self.space, w_obj, dict_w) @@ -750,7 +751,7 @@ def next_key_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -772,7 +773,7 @@ def next_value_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -793,7 +794,7 @@ def next_item_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None, None if self.curr_map: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -18,7 +18,7 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, W_DictObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject @@ -439,7 +439,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_bytes() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() @@ -454,7 +454,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_unicode() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() @@ -467,7 +467,7 @@ def listview_int(self, w_obj): if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() @@ -485,7 +485,7 @@ return None def view_as_kwargs(self, w_dict): - if type(w_dict) is W_DictMultiObject: + if type(w_dict) is W_DictObject: return w_dict.view_as_kwargs() return (None, None) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,7 +1,7 @@ import py from pypy.objspace.std.celldict import ModuleDictStrategy -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictObject, W_ModuleDictObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, FakeString) @@ -14,7 +14,7 @@ def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version key = "a" @@ -23,30 +23,30 @@ v2 = strategy.version assert v1 is not v2 assert d.getitem(w_key) == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) == 1 d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 assert d.getitem(w_key) == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 2 d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 assert d.getitem(w_key) == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 3 d.delitem(w_key) v5 = strategy.version assert v5 is not v4 assert d.getitem(w_key) is None - assert d.strategy.getdictvalue_no_unwrapping(d, key) is None + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version x = object() @@ -134,7 +134,7 @@ py.test.skip("__repr__ doesn't work on appdirect") strategy = ModuleDictStrategy(cls.space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + cls.w_d = W_ModuleDictObject(cls.space, strategy, storage) def test_popitem(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,14 +2,14 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + W_DictObject, BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): def test_empty(self): d = self.space.newdict() assert not self.space.is_true(d) - assert type(d.strategy) is not ObjectDictStrategy + assert type(d.get_strategy()) is not ObjectDictStrategy def test_nonempty(self): space = self.space @@ -1050,7 +1050,7 @@ return l def newlist_bytes(self, l): return l - DictObjectCls = W_DictMultiObject + DictObjectCls = W_DictObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str @@ -1076,7 +1076,7 @@ return tuple(l) def newdict(self, module=False, instance=False): - return W_DictMultiObject.allocate_and_init_instance( + return W_DictObject.allocate_and_init_instance( self, module=module, instance=instance) def view_as_kwargs(self, w_d): @@ -1105,7 +1105,7 @@ w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode - w_dict = W_DictMultiObject + w_dict = W_DictObject iter = iter fixedview = list listview = list @@ -1149,8 +1149,8 @@ def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() - w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None) - W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage) + w_dict = self.fakespace.allocate_instance(W_DictObject, None) + W_DictObject.__init__(w_dict, self.fakespace, strategy, storage) return w_dict def fill_impl(self): @@ -1159,7 +1159,7 @@ def check_not_devolved(self): #XXX check if strategy changed!? - assert type(self.impl.strategy) is self.StrategyClass + assert type(self.impl.get_strategy()) is self.StrategyClass #assert self.impl.r_dict_content is None def test_popitem(self): @@ -1246,7 +1246,7 @@ for x in xrange(100): impl.setitem(self.fakespace.str_w(str(x)), x) impl.setitem(x, x) - assert type(impl.strategy) is ObjectDictStrategy + assert type(impl.get_strategy()) is ObjectDictStrategy def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names @@ -1308,7 +1308,7 @@ class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) - self.impl.strategy.switch_to_object_strategy(self.impl) + self.impl.get_strategy().switch_to_object_strategy(self.impl) def check_not_devolved(self): pass @@ -1320,5 +1320,5 @@ def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.get_strategy()) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,5 +1,5 @@ import py -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.kwargsdict import * space = FakeSpace() @@ -9,7 +9,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -23,7 +23,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -52,7 +52,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -69,11 +69,11 @@ def test_limit_size(): storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) for i in range(100): assert d.setitem_str("d%s" % i, 4) is None - assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert d.get_strategy() is not strategy + assert "BytesDictStrategy" == d.get_strategy().__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -82,7 +82,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) w_l = d.w_keys() # does not crash def test_view_as_kwargs(): @@ -91,26 +91,27 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == [], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) d.setitem_str("a", 3) - assert isinstance(d.strategy, KwargsDictStrategy) + assert isinstance(d.get_strategy(), KwargsDictStrategy) from pypy.objspace.std.test.test_dictmultiobject import BaseTestRDictImplementation, BaseTestDevolvedDictImplementation def get_impl(self): storage = strategy.erase(([], [])) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) + class TestKwargsDictImplementation(BaseTestRDictImplementation): StrategyClass = KwargsDictStrategy get_impl = get_impl diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * class Config: @@ -309,7 +309,7 @@ obj.setdictvalue(space, "c", 7) assert obj.storage == [50, 60, 70, 5, 6, 7] - class FakeDict(W_DictMultiObject): + class FakeDict(W_DictObject): def __init__(self, d): self.dstorage = d @@ -368,7 +368,7 @@ def devolve_dict(space, obj): w_d = obj.getdict(space) - w_d.strategy.switch_to_object_strategy(w_d) + w_d.get_strategy().switch_to_object_strategy(w_d) def test_get_setdictvalue_after_devolve(): cls = Class() @@ -1127,7 +1127,7 @@ def test_newdict_instance(): w_dict = space.newdict(instance=True) - assert type(w_dict.strategy) is MapDictStrategy + assert type(w_dict.get_strategy()) is MapDictStrategy class TestMapDictImplementationUsingnewdict(BaseTestRDictImplementation): StrategyClass = MapDictStrategy diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -478,12 +478,12 @@ def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy - from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject if w_self.lazyloaders: w_self._cleanup_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) def is_heaptype(w_self): return w_self.flag_heaptype @@ -1139,7 +1139,7 @@ space = w_self.space caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_name = space.finditem(w_globals, space.wrap('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,7 @@ for key, w_value in vars.items(): space.setitem(self.w_locals, space.wrap(key), w_value) if isinstance(code, str): - return space.eval(code, self.w_globals, self.w_locals) + return space.eval(code, self.get_w_globals(), self.w_locals) pyc = pycode.PyCode._from_code(space, code) return pyc.exec_host_bytecode(self.w_globals, self.w_locals) exec_ = eval @@ -248,7 +248,7 @@ #if filename.endswith("pyc"): # filename = filename[:-1] try: - space.exec_(str(source), frame.w_globals, w_locals, + space.exec_(str(source), frame.get_w_globals(), w_locals, filename=filename) except OperationError, e: if e.match(space, w_ExpectedException): diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -32,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -41,7 +41,7 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None + self._lazy_set = None def register_info(self, structop, info): # invariant: every struct or array ptr info, that is not virtual and @@ -53,27 +53,27 @@ def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -86,27 +86,27 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield - self._lazy_setfield = op + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) @@ -114,15 +114,15 @@ return res.get_box_replacement() return None - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -250,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -326,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -354,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -432,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -442,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -453,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -471,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -511,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -528,6 +528,7 @@ if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: From pypy.commits at gmail.com Thu Jan 14 16:53:11 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Thu, 14 Jan 2016 13:53:11 -0800 (PST) Subject: [pypy-commit] pypy repeatlist_strategy: backout e2d983c38082, it was wrong, the failure comes from repeatlist strategy Message-ID: <569818c7.6953c20a.5d168.fffff7fb@mx.google.com> Author: Vincent Legoll Branch: repeatlist_strategy Changeset: r81769:23581fb2f3d5 Date: 2016-01-14 22:45 +0100 http://bitbucket.org/pypy/pypy/changeset/23581fb2f3d5/ Log: backout e2d983c38082, it was wrong, the failure comes from repeatlist strategy diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -76,12 +76,12 @@ def test_remove_mutating(self): collections = self.collections - d = collections.deque([MutatingCmp()]) class MutatingCmp(object): def __eq__(self, other): d.clear() return True + d = collections.deque([MutatingCmp()]) raises(IndexError, d.remove, 1) def test_remove_failing(self): From pypy.commits at gmail.com Thu Jan 14 17:11:01 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 14 Jan 2016 14:11:01 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: Bah Message-ID: <56981cf5.a453c20a.3d6b7.fffffdcf@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2583:1c5c52f0ee90 Date: 2016-01-14 23:10 +0100 http://bitbucket.org/cffi/cffi/changeset/1c5c52f0ee90/ Log: Bah diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -123,6 +123,9 @@ XXX we should also give a way to write errors to a buffer instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? */ Py_InitializeEx(0); } diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -122,6 +122,10 @@ call it, an error message is also printed to stderr and the function returns zero/null. + Note that the CFFI module never calls ``exit()``, but CPython itself + contains code that calls ``exit()``, for example if importing + ``site`` fails. This may be worked around in the future. + * **ffi.set_source(c_module_name, c_code):** set the name of the module from Python's point of view. It also gives more C code which will be included in the generated C code. In trivial examples it From pypy.commits at gmail.com Thu Jan 14 18:35:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Jan 2016 15:35:39 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Extract name computation out of FuncNode.__init__ Message-ID: <569830cb.e251c20a.edf1d.0fa0@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81770:ae4115e11c00 Date: 2016-01-13 00:09 +0000 http://bitbucket.org/pypy/pypy/changeset/ae4115e11c00/ Log: Extract name computation out of FuncNode.__init__ diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -818,17 +818,10 @@ self.globalcontainer = True self.T = T self.obj = obj - callable = getattr(obj, '_callable', None) - if (callable is not None and - getattr(callable, 'c_name', None) is not None): - self.name = forcename or obj._callable.c_name - elif (getattr(obj, 'external', None) == 'C' and - (not db.sandbox or not need_sandboxing(obj))): - self.name = forcename or self.basename() + if forcename: + self.name = forcename else: - self.name = (forcename or - db.namespace.uniquename('g_' + self.basename())) - + self.name = _select_name(db, obj) self.funcgen = select_function_code_generators(obj, db, self.name) if self.funcgen: argnames = self.funcgen.argnames() @@ -958,6 +951,17 @@ else: raise ValueError("don't know how to generate code for %r" % (fnobj,)) +def _select_name(db, obj): + try: + return obj._callable.c_name + except AttributeError: + pass + if (getattr(obj, 'external', None) == 'C' and + (not db.sandbox or not need_sandboxing(obj))): + return obj._name + return db.namespace.uniquename('g_' + obj._name) + + class ExtType_OpaqueNode(ContainerNode): nodekind = 'rpyopaque' From pypy.commits at gmail.com Thu Jan 14 18:35:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Jan 2016 15:35:41 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Create factory function for creating FuncNodes Message-ID: <569830cd.ca061c0a.c5eb2.004c@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81771:54678c7d1ee3 Date: 2016-01-13 18:57 +0000 http://bitbucket.org/pypy/pypy/changeset/54678c7d1ee3/ Log: Create factory function for creating FuncNodes diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -813,16 +813,13 @@ # there not so many node of this kind, slots should not # be necessary - def __init__(self, db, T, obj, forcename=None): + def __init__(self, db, T, obj, ptrname): Node.__init__(self, db) self.globalcontainer = True self.T = T self.obj = obj - if forcename: - self.name = forcename - else: - self.name = _select_name(db, obj) - self.funcgen = select_function_code_generators(obj, db, self.name) + self.name = ptrname + self.funcgen = select_function_code_generators(obj, db, ptrname) if self.funcgen: argnames = self.funcgen.argnames() self.implementationtypename = db.gettype(T, argnames=argnames) @@ -905,6 +902,13 @@ del bodyiter funcgen.implementation_end() +def new_funcnode(db, T, obj, forcename=None): + if forcename: + name = forcename + else: + name = _select_name(db, obj) + return FuncNode(db, T, obj, name) + def sandbox_stub(fnobj, db): # unexpected external function for --sandbox translation: replace it # with a "Not Implemented" stub. @@ -1070,7 +1074,7 @@ Array: ArrayNode, GcArray: ArrayNode, FixedSizeArray: FixedSizeArrayNode, - FuncType: FuncNode, + FuncType: new_funcnode, OpaqueType: opaquenode_factory, llmemory._WeakRefType: weakrefnode_factory, llgroup.GroupType: GroupNode, From pypy.commits at gmail.com Thu Jan 14 18:35:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Jan 2016 15:35:44 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Do the sandbox transform slightly earlier Message-ID: <569830d0.6a69c20a.b6a34.1545@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81773:98af64007405 Date: 2016-01-14 19:20 +0000 http://bitbucket.org/pypy/pypy/changeset/98af64007405/ Log: Do the sandbox transform slightly earlier diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -903,6 +903,20 @@ funcgen.implementation_end() def new_funcnode(db, T, obj, forcename=None): + sandbox = db.sandbox and need_sandboxing(obj) + if sandbox: + if hasattr(obj, 'graph') and sandbox != 'if_external': + graph = rsandbox.get_external_function_sandbox_graph( + obj, db.translator.rtyper) + obj.__dict__['graph'] = graph + obj.__dict__.pop('_safe_not_sandboxed', None) + obj.__dict__.pop('external', None) + elif getattr(obj, 'external', None) is not None: + obj.__dict__['graph'] = rsandbox.get_sandbox_stub( + obj, db.translator.rtyper) + obj.__dict__.pop('_safe_not_sandboxed', None) + obj.__dict__.pop('external', None) + if forcename: name = forcename else: @@ -919,16 +933,6 @@ return "if_external" def select_function_code_generators(fnobj, db, functionname): - sandbox = db.sandbox and need_sandboxing(fnobj) - if sandbox: - if hasattr(fnobj, 'graph') and sandbox != 'if_external': - graph = rsandbox.get_external_function_sandbox_graph( - fnobj, db.translator.rtyper) - fnobj.__dict__['graph'] = graph - elif getattr(fnobj, 'external', None) is not None: - fnobj.__dict__['graph'] = rsandbox.get_sandbox_stub( - fnobj, db.translator.rtyper) - if hasattr(fnobj, 'graph'): exception_policy = getattr(fnobj, 'exception_policy', None) return make_funcgen(fnobj.graph, db, exception_policy, functionname) @@ -947,8 +951,7 @@ return obj._callable.c_name except AttributeError: pass - if (getattr(obj, 'external', None) == 'C' and - (not db.sandbox or not need_sandboxing(obj))): + if getattr(obj, 'external', None) == 'C': return obj._name return db.namespace.uniquename('g_' + obj._name) From pypy.commits at gmail.com Thu Jan 14 18:35:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 14 Jan 2016 15:35:43 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Do the sandbox transform by hacking fnobj.__dict__ Message-ID: <569830cf.863f1c0a.ff9da.2bfa@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81772:352ff7c8975d Date: 2016-01-13 19:46 +0000 http://bitbucket.org/pypy/pypy/changeset/352ff7c8975d/ Log: Do the sandbox transform by hacking fnobj.__dict__ diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -909,20 +909,6 @@ name = _select_name(db, obj) return FuncNode(db, T, obj, name) -def sandbox_stub(fnobj, db): - # unexpected external function for --sandbox translation: replace it - # with a "Not Implemented" stub. - graph = rsandbox.get_sandbox_stub(fnobj, db.translator.rtyper) - return make_funcgen(graph, db) - -def sandbox_transform(fnobj, db): - # for --sandbox: replace a function like os_open_llimpl() with - # code that communicates with the external process to ask it to - # perform the operation. - graph = rsandbox.get_external_function_sandbox_graph( - fnobj, db.translator.rtyper) - return make_funcgen(graph, db) - def need_sandboxing(fnobj): if hasattr(fnobj, '_safe_not_sandboxed'): return not fnobj._safe_not_sandboxed @@ -936,13 +922,14 @@ sandbox = db.sandbox and need_sandboxing(fnobj) if sandbox: if hasattr(fnobj, 'graph') and sandbox != 'if_external': - # apply the sandbox transformation - return sandbox_transform(fnobj, db) + graph = rsandbox.get_external_function_sandbox_graph( + fnobj, db.translator.rtyper) + fnobj.__dict__['graph'] = graph elif getattr(fnobj, 'external', None) is not None: - return sandbox_stub(fnobj, db) + fnobj.__dict__['graph'] = rsandbox.get_sandbox_stub( + fnobj, db.translator.rtyper) + if hasattr(fnobj, 'graph'): - if db.sandbox: - assert getattr(fnobj, '_safe_not_sandboxed', True) exception_policy = getattr(fnobj, 'exception_policy', None) return make_funcgen(fnobj.graph, db, exception_policy, functionname) elif getattr(fnobj, 'external', None) is not None: From pypy.commits at gmail.com Thu Jan 14 23:00:29 2016 From: pypy.commits at gmail.com (mjacob) Date: Thu, 14 Jan 2016 20:00:29 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Explicitly set random_effects_on_gcobjs flag if constructing an external function pointer. Message-ID: <56986edd.2457c20a.fab7.3c4c@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81775:d426143d7ae9 Date: 2016-01-15 04:59 +0100 http://bitbucket.org/pypy/pypy/changeset/d426143d7ae9/ Log: Explicitly set random_effects_on_gcobjs flag if constructing an external function pointer. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1738,6 +1738,7 @@ def extfunc(name, args, result, compilation_info): func_type = lltype.FuncType(args, result) return lltype.functionptr(func_type, name, external='C', calling_conv='c', + random_effects_on_gcobjs=False, compilation_info=compilation_info) eci = ExternalCompilationInfo() From pypy.commits at gmail.com Thu Jan 14 23:00:26 2016 From: pypy.commits at gmail.com (mjacob) Date: Thu, 14 Jan 2016 20:00:26 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <56986eda.a85fc20a.6917f.40c7@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81774:92a85579bb26 Date: 2016-01-15 04:29 +0100 http://bitbucket.org/pypy/pypy/changeset/92a85579bb26/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,7 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -153,7 +153,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -42,6 +42,14 @@ class W_DictMultiObject(W_Root): + """ Abstract base class that does not store a strategy. """ + def get_strategy(self): + raise NotImplementedError("abstract method") + + def set_strategy(self, strategy): + raise NotImplementedError("abstract method") + + @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, strdict=False, @@ -52,6 +60,10 @@ # every module needs its own strategy, because the strategy stores # the version tag strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) + W_ModuleDictObject.__init__(w_obj, space, strategy, storage) + return w_obj elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) @@ -68,18 +80,17 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_obj = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_obj, space, strategy, storage) + w_obj = space.allocate_instance(W_DictObject, w_type) + W_DictObject.__init__(w_obj, space, strategy, storage) return w_obj - def __init__(self, space, strategy, storage): + def __init__(self, space, storage): self.space = space - self.strategy = strategy self.dstorage = storage def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.get_strategy()) def unwrap(w_dict, space): result = {} @@ -101,7 +112,7 @@ self.setitem(w_k, w_v) def setitem_str(self, key, w_value): - self.strategy.setitem_str(self, key, w_value) + self.get_strategy().setitem_str(self, key, w_value) @staticmethod def descr_new(space, w_dicttype, __args__): @@ -261,8 +272,9 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.has_iterreversed: - it = self.strategy.iterreversed(self) + strategy = self.get_strategy() + if strategy.has_iterreversed: + it = strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: # fall-back @@ -337,6 +349,37 @@ init_or_update(space, self, __args__, 'dict.update') +class W_DictObject(W_DictMultiObject): + """ a regular dict object """ + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.dstrategy = strategy + + def get_strategy(self): + return self.dstrategy + + def set_strategy(self, strategy): + self.dstrategy = strategy + + +class W_ModuleDictObject(W_DictMultiObject): + """ a dict object for a module, that is not expected to change. It stores + the strategy as a quasi-immutable field. """ + _immutable_fields_ = ['mstrategy?'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.mstrategy = strategy + + def get_strategy(self): + return self.mstrategy + + def set_strategy(self, strategy): + self.mstrategy = strategy + + + + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ popitem delitem clear \ @@ -347,7 +390,7 @@ def make_method(method): def f(self, *args): - return getattr(self.strategy, method)(self, *args) + return getattr(self.get_strategy(), method)(self, *args) f.func_name = method return f @@ -490,7 +533,7 @@ def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def listview_bytes(self, w_dict): @@ -556,32 +599,32 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(UnicodeDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_int_strategy(self, w_dict): strategy = self.space.fromcache(IntDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_identity_strategy(self, w_dict): from pypy.objspace.std.identitydict import IdentityDictStrategy strategy = self.space.fromcache(IdentityDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_object_strategy(self, w_dict): strategy = self.space.fromcache(ObjectDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def getitem(self, w_dict, w_key): @@ -662,7 +705,7 @@ if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 - if self.strategy is self.dictimplementation.strategy: + if self.strategy is self.dictimplementation.get_strategy(): return result # common case else: # waaa, obscure case: the strategy changed, but not the @@ -804,7 +847,7 @@ else: return # w_dict is completely empty, nothing to do count = w_dict.length() - 1 - w_updatedict.strategy.prepare_update(w_updatedict, count) + w_updatedict.get_strategy().prepare_update(w_updatedict, count) # If the strategy is still different, continue the slow way if not same_strategy(self, w_updatedict): for key, value, keyhash in iteritemsh: @@ -825,7 +868,7 @@ def same_strategy(self, w_otherdict): return (setitem_untyped is not None and - w_otherdict.strategy is self) + w_otherdict.get_strategy() is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -934,7 +977,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, value in d.iteritems(): d_new[self.wrap(key)] = value - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) # --------------- iterator interface ----------------- @@ -1178,7 +1221,7 @@ def update1_dict_dict(space, w_dict, w_data): - w_data.strategy.rev_update1_dict_dict(w_data, w_dict) + w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -18,7 +18,7 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage @@ -142,7 +142,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): d_new[self.wrap(keys[i])] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def switch_to_bytes_strategy(self, w_dict): @@ -152,7 +152,7 @@ d_new = strategy.unerase(storage) for i in range(len(keys)): d_new[keys[i]] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def view_as_kwargs(self, w_dict): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -6,7 +6,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, - BaseValueIterator, BaseItemIterator, _never_equal_to_string + BaseValueIterator, BaseItemIterator, _never_equal_to_string, + W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell @@ -407,7 +408,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) - w_dict = W_DictMultiObject(space, strategy, storage) + w_dict = W_DictObject(space, strategy, storage) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag return w_dict @@ -422,8 +423,8 @@ # new dict. If the old dict was using the MapDictStrategy, we # have to force it now: otherwise it would remain an empty # shell that continues to delegate to 'self'. - if type(w_olddict.strategy) is MapDictStrategy: - w_olddict.strategy.switch_to_object_strategy(w_olddict) + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) assert flag @@ -641,7 +642,7 @@ w_obj = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) dict_w = strategy.unerase(strategy.get_empty_storage()) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(dict_w) assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None materialize_r_dict(self.space, w_obj, dict_w) @@ -750,7 +751,7 @@ def next_key_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -772,7 +773,7 @@ def next_value_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: @@ -793,7 +794,7 @@ def next_item_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None, None if self.curr_map: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -18,7 +18,7 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, W_DictObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject @@ -439,7 +439,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_bytes() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() @@ -454,7 +454,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_unicode() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() @@ -467,7 +467,7 @@ def listview_int(self, w_obj): if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() @@ -485,7 +485,7 @@ return None def view_as_kwargs(self, w_dict): - if type(w_dict) is W_DictMultiObject: + if type(w_dict) is W_DictObject: return w_dict.view_as_kwargs() return (None, None) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -1,7 +1,7 @@ import py from pypy.objspace.std.celldict import ModuleDictStrategy -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictObject, W_ModuleDictObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, FakeString) @@ -14,7 +14,7 @@ def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version key = "a" @@ -23,30 +23,30 @@ v2 = strategy.version assert v1 is not v2 assert d.getitem(w_key) == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) == 1 d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 assert d.getitem(w_key) == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 2 d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 assert d.getitem(w_key) == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 3 d.delitem(w_key) v5 = strategy.version assert v5 is not v4 assert d.getitem(w_key) is None - assert d.strategy.getdictvalue_no_unwrapping(d, key) is None + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version x = object() @@ -134,7 +134,7 @@ py.test.skip("__repr__ doesn't work on appdirect") strategy = ModuleDictStrategy(cls.space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + cls.w_d = W_ModuleDictObject(cls.space, strategy, storage) def test_popitem(self): import __pypy__ diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,14 +2,14 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + W_DictObject, BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): def test_empty(self): d = self.space.newdict() assert not self.space.is_true(d) - assert type(d.strategy) is not ObjectDictStrategy + assert type(d.get_strategy()) is not ObjectDictStrategy def test_nonempty(self): space = self.space @@ -1050,7 +1050,7 @@ return l def newlist_bytes(self, l): return l - DictObjectCls = W_DictMultiObject + DictObjectCls = W_DictObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str @@ -1076,7 +1076,7 @@ return tuple(l) def newdict(self, module=False, instance=False): - return W_DictMultiObject.allocate_and_init_instance( + return W_DictObject.allocate_and_init_instance( self, module=module, instance=instance) def view_as_kwargs(self, w_d): @@ -1105,7 +1105,7 @@ w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode - w_dict = W_DictMultiObject + w_dict = W_DictObject iter = iter fixedview = list listview = list @@ -1149,8 +1149,8 @@ def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() - w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None) - W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage) + w_dict = self.fakespace.allocate_instance(W_DictObject, None) + W_DictObject.__init__(w_dict, self.fakespace, strategy, storage) return w_dict def fill_impl(self): @@ -1159,7 +1159,7 @@ def check_not_devolved(self): #XXX check if strategy changed!? - assert type(self.impl.strategy) is self.StrategyClass + assert type(self.impl.get_strategy()) is self.StrategyClass #assert self.impl.r_dict_content is None def test_popitem(self): @@ -1246,7 +1246,7 @@ for x in xrange(100): impl.setitem(self.fakespace.str_w(str(x)), x) impl.setitem(x, x) - assert type(impl.strategy) is ObjectDictStrategy + assert type(impl.get_strategy()) is ObjectDictStrategy def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names @@ -1308,7 +1308,7 @@ class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) - self.impl.strategy.switch_to_object_strategy(self.impl) + self.impl.get_strategy().switch_to_object_strategy(self.impl) def check_not_devolved(self): pass @@ -1320,5 +1320,5 @@ def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.get_strategy()) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -1,5 +1,5 @@ import py -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.kwargsdict import * space = FakeSpace() @@ -9,7 +9,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -23,7 +23,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -52,7 +52,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -69,11 +69,11 @@ def test_limit_size(): storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) for i in range(100): assert d.setitem_str("d%s" % i, 4) is None - assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert d.get_strategy() is not strategy + assert "BytesDictStrategy" == d.get_strategy().__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -82,7 +82,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) w_l = d.w_keys() # does not crash def test_view_as_kwargs(): @@ -91,26 +91,27 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert (space.view_as_kwargs(d) == [], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) d.setitem_str("a", 3) - assert isinstance(d.strategy, KwargsDictStrategy) + assert isinstance(d.get_strategy(), KwargsDictStrategy) from pypy.objspace.std.test.test_dictmultiobject import BaseTestRDictImplementation, BaseTestDevolvedDictImplementation def get_impl(self): storage = strategy.erase(([], [])) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) + class TestKwargsDictImplementation(BaseTestRDictImplementation): StrategyClass = KwargsDictStrategy get_impl = get_impl diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,4 +1,4 @@ -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * class Config: @@ -309,7 +309,7 @@ obj.setdictvalue(space, "c", 7) assert obj.storage == [50, 60, 70, 5, 6, 7] - class FakeDict(W_DictMultiObject): + class FakeDict(W_DictObject): def __init__(self, d): self.dstorage = d @@ -368,7 +368,7 @@ def devolve_dict(space, obj): w_d = obj.getdict(space) - w_d.strategy.switch_to_object_strategy(w_d) + w_d.get_strategy().switch_to_object_strategy(w_d) def test_get_setdictvalue_after_devolve(): cls = Class() @@ -1127,7 +1127,7 @@ def test_newdict_instance(): w_dict = space.newdict(instance=True) - assert type(w_dict.strategy) is MapDictStrategy + assert type(w_dict.get_strategy()) is MapDictStrategy class TestMapDictImplementationUsingnewdict(BaseTestRDictImplementation): StrategyClass = MapDictStrategy diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -478,12 +478,12 @@ def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy - from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject if w_self.lazyloaders: w_self._cleanup_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) def is_heaptype(w_self): return w_self.flag_heaptype @@ -1139,7 +1139,7 @@ space = w_self.space caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_name = space.finditem(w_globals, space.wrap('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -63,7 +63,7 @@ for key, w_value in vars.items(): space.setitem(self.w_locals, space.wrap(key), w_value) if isinstance(code, str): - return space.eval(code, self.w_globals, self.w_locals) + return space.eval(code, self.get_w_globals(), self.w_locals) pyc = pycode.PyCode._from_code(space, code) return pyc.exec_host_bytecode(self.w_globals, self.w_locals) exec_ = eval @@ -248,7 +248,7 @@ #if filename.endswith("pyc"): # filename = filename[:-1] try: - space.exec_(str(source), frame.w_globals, w_locals, + space.exec_(str(source), frame.get_w_globals(), w_locals, filename=filename) except OperationError, e: if e.match(space, w_ExpectedException): diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -331,11 +331,8 @@ class RandomEffectsAnalyzer(BoolGraphAnalyzer): def analyze_external_call(self, funcobj, seen=None): - try: - if funcobj.random_effects_on_gcobjs: - return True - except AttributeError: - return True # better safe than sorry + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( funcobj, seen) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -32,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -41,7 +41,7 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None + self._lazy_set = None def register_info(self, structop, info): # invariant: every struct or array ptr info, that is not virtual and @@ -53,27 +53,27 @@ def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -86,27 +86,27 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield - self._lazy_setfield = op + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) @@ -114,15 +114,15 @@ return res.get_box_replacement() return None - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -250,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -326,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -354,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -432,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -442,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -453,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -471,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -511,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -528,6 +528,7 @@ if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -36,7 +36,7 @@ return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) def analyze_external_call(self, funcobj, seen=None): - if getattr(funcobj, 'random_effects_on_gcobjs', False): + if funcobj.random_effects_on_gcobjs: return True return graphanalyze.BoolGraphAnalyzer.analyze_external_call( self, funcobj, seen) diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -22,7 +22,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, + Ptr, ContainerType, FuncType, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper import rclass @@ -876,18 +876,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -90,8 +89,10 @@ if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x - graph = funcobj.graph - assert graph is not None + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Fri Jan 15 02:33:25 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 14 Jan 2016 23:33:25 -0800 (PST) Subject: [pypy-commit] pypy typed-cells: merge default Message-ID: <5698a0c5.02371c0a.d1779.ffff9ae5@mx.google.com> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r81776:07be65d4b6c9 Date: 2016-01-15 08:32 +0100 http://bitbucket.org/pypy/pypy/changeset/07be65d4b6c9/ Log: merge default diff too long, truncating to 2000 out of 130117 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +.cache/ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -15,3 +15,6 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 +f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 +5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at @@ -56,14 +56,15 @@ Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen + Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -87,7 +88,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -168,7 +168,6 @@ Michael Twomey Lucian Branescu Mihaila Yichao Yu - Anton Gulenko Gabriel Lavoie Olivier Dormond Jared Grubb @@ -201,9 +200,12 @@ Alex Perry Vincent Legoll Alan McIntyre + Spenser Bauman Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre + Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg @@ -215,6 +217,7 @@ Carl Meyer Karl Ramm Pieter Zieschang + Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -234,6 +237,7 @@ Lutz Paelike Lucio Torre Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo Dan Buch Miguel de Val Borro @@ -244,14 +248,17 @@ Martin Blais Lene Wagner Tomo Cocoa + Kim Jin Su Toni Mattis Lucas Stadler Julian Berman + Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez William Leslie Bobby Impollonia + Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara @@ -282,6 +289,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey @@ -308,13 +316,16 @@ Stefan Marr jiaaro Mads Kiilerich + Richard Lancaster opassembler.py Antony Lee + Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer Even Wiik Thomassen jbs squeaky + Zearin soareschen Kurt Griffiths Mike Bayer @@ -326,6 +337,7 @@ Anna Ravencroft Andrey Churin Dan Crosta + Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz @@ -352,8 +364,7 @@ Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files in the 'lib-python/2.7' directory are all copyrighted by the Python Software Foundation and licensed -under the Python Software License of which you can find a copy here: -http://www.python.org/doc/Copyright.html +under the terms that you can find here: https://docs.python.org/2/license.html License for 'pypy/module/unicodedata/' ====================================== @@ -430,9 +441,9 @@ gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed under the terms of the GPL license as well. -License for 'pypy/module/_vmprof/src' +License for 'rpython/rlib/rvmprof/src' -------------------------------------- The code is based on gperftools. You may see a copy of the License for it at - https://code.google.com/p/gperftools/source/browse/COPYING + https://github.com/gperftools/gperftools/blob/master/COPYING diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -all: pypy-c +all: pypy-c cffi_imports PYPY_EXECUTABLE := $(shell which pypy) URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") @@ -10,6 +10,8 @@ RUNINTERP = $(PYPY_EXECUTABLE) endif +.PHONY: cffi_imports + pypy-c: @echo @echo "====================================================================" @@ -36,3 +38,6 @@ # replaced with an opaque --jobserver option by the time this Makefile # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html + +cffi_imports: pypy-c + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -308,7 +308,10 @@ if (len(data) != 8 or data[:4] != imp.get_magic() or struct.unpack(" (k, v), return and remove a (key, value) pair. @@ -116,6 +116,178 @@ return ItemsView(self) +def _compat_with_unordered_dicts(): + # This returns the methods needed in OrderedDict in case the base + # 'dict' class is not actually ordered, like on top of CPython or + # old PyPy or PyPy-STM. + + # ===== Original comments and code follows ===== + # ===== The unmodified methods are not repeated ===== + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + return locals() + +if _reversed_dict is None: + for _key, _value in _compat_with_unordered_dicts().items(): + setattr(OrderedDict, _key, _value) + del _key, _value + ################################################################################ ### namedtuple ################################################################################ diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -8,13 +8,13 @@ def __init__(self): self._builder = StringBuilder() def append(self, string): - try: - self._builder.append(string) - except UnicodeEncodeError: + if (isinstance(string, unicode) and + type(self._builder) is StringBuilder): ub = UnicodeBuilder() ub.append(self._builder.build()) self._builder = ub - ub.append(string) + self.append = ub.append # shortcut only + self._builder.append(string) def build(self): return self._builder.build() diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -604,21 +604,8 @@ def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=_buffer.raw) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except: - import random - bytes = [chr(random.randrange(256)) for i in range(16)] - return UUID(bytes=bytes, version=4) + import os + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), @@ -303,7 +303,7 @@ RegrTest('test_memoryio.py'), RegrTest('test_memoryview.py'), RegrTest('test_md5.py'), - RegrTest('test_mhlib.py'), + RegrTest('test_mhlib.py', usemodules='binascii struct'), RegrTest('test_mimetools.py'), RegrTest('test_mimetypes.py'), RegrTest('test_MimeWriter.py', core=False, usemodules='binascii'), diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1026,16 +1026,22 @@ def tigetflag(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetflag(capname) def tigetnum(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetnum(capname) def tigetstr(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') val = lib.tigetstr(capname) if int(ffi.cast("intptr_t", val)) in (0, -1): return None diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -212,8 +212,8 @@ #include #endif -char *get_tk_version() { return TK_VERSION; } -char *get_tcl_version() { return TCL_VERSION; } +char *get_tk_version(void) { return TK_VERSION; } +char *get_tcl_version(void) { return TCL_VERSION; } """ % globals(), include_dirs=incdirs, libraries=linklibs, diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.2.0 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.2.0" -__version_info__ = (1, 2, 0) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -46,7 +46,7 @@ # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif @@ -146,7 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) -#define _CFFI_NUM_EXPORTS 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) +#define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -201,8 +203,11 @@ the others follow */ } +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); #endif -/********** end CPython-specific section **********/ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) @@ -214,6 +219,12 @@ (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ _CFFI__UNKNOWN_PRIM) +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + #define _cffi_check_int(got, got_nonpos, expected) \ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py --- a/lib_pypy/cffi/_pycparser/__init__.py +++ b/lib_pypy/cffi/_pycparser/__init__.py @@ -4,11 +4,11 @@ # This package file exports some convenience functions for # interacting with pycparser # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #----------------------------------------------------------------- __all__ = ['c_lexer', 'c_parser', 'c_ast'] -__version__ = '2.10' +__version__ = '2.14' from subprocess import Popen, PIPE from .c_parser import CParser @@ -91,4 +91,3 @@ if parser is None: parser = CParser() return parser.parse(text, filename) - diff --git a/lib_pypy/cffi/_pycparser/_ast_gen.py b/lib_pypy/cffi/_pycparser/_ast_gen.py --- a/lib_pypy/cffi/_pycparser/_ast_gen.py +++ b/lib_pypy/cffi/_pycparser/_ast_gen.py @@ -1,13 +1,13 @@ #----------------------------------------------------------------- # _ast_gen.py # -# Generates the AST Node classes from a specification given in -# a .yaml file +# Generates the AST Node classes from a specification given in +# a configuration file # # The design of this module was inspired by astgen.py from the # Python 2.5 code-base. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #----------------------------------------------------------------- import pprint @@ -20,7 +20,7 @@ file. """ self.cfg_filename = cfg_filename - self.node_cfg = [NodeCfg(name, contents) + self.node_cfg = [NodeCfg(name, contents) for (name, contents) in self.parse_cfgfile(cfg_filename)] def generate(self, file=None): @@ -28,11 +28,11 @@ """ src = Template(_PROLOGUE_COMMENT).substitute( cfg_filename=self.cfg_filename) - + src += _PROLOGUE_CODE for node_cfg in self.node_cfg: src += node_cfg.generate_source() + '\n\n' - + file.write(src) def parse_cfgfile(self, filename): @@ -57,10 +57,10 @@ class NodeCfg(object): - """ Node configuration. + """ Node configuration. name: node name - contents: a list of contents - attributes and child nodes + contents: a list of contents - attributes and child nodes See comment at the top of the configuration file for details. """ def __init__(self, name, contents): @@ -73,7 +73,7 @@ for entry in contents: clean_entry = entry.rstrip('*') self.all_entries.append(clean_entry) - + if entry.endswith('**'): self.seq_child.append(clean_entry) elif entry.endswith('*'): @@ -86,26 +86,30 @@ src += '\n' + self._gen_children() src += '\n' + self._gen_attr_names() return src - + def _gen_init(self): src = "class %s(Node):\n" % self.name if self.all_entries: args = ', '.join(self.all_entries) + slots = ', '.join("'{0}'".format(e) for e in self.all_entries) + slots += ", 'coord', '__weakref__'" arglist = '(self, %s, coord=None)' % args else: + slots = "'coord', '__weakref__'" arglist = '(self, coord=None)' - + + src += " __slots__ = (%s)\n" % slots src += " def __init__%s:\n" % arglist - + for name in self.all_entries + ['coord']: src += " self.%s = %s\n" % (name, name) - + return src def _gen_children(self): src = ' def children(self):\n' - + if self.all_entries: src += ' nodelist = []\n' @@ -114,21 +118,21 @@ ' if self.%(child)s is not None:' + ' nodelist.append(("%(child)s", self.%(child)s))\n') % ( dict(child=child)) - + for seq_child in self.seq_child: src += ( ' for i, child in enumerate(self.%(child)s or []):\n' ' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % ( dict(child=seq_child)) - + src += ' return tuple(nodelist)\n' else: src += ' return ()\n' - - return src + + return src def _gen_attr_names(self): - src = " attr_names = (" + ''.join("%r," % nm for nm in self.attr) + ')' + src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')' return src @@ -136,7 +140,7 @@ r'''#----------------------------------------------------------------- # ** ATTENTION ** # This code was automatically generated from the file: -# $cfg_filename +# $cfg_filename # # Do not modify it directly. Modify the configuration file and # run the generator again. @@ -146,7 +150,7 @@ # # AST Node classes. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #----------------------------------------------------------------- @@ -157,6 +161,7 @@ class Node(object): + __slots__ = () """ Abstract base class for AST nodes. """ def children(self): @@ -167,21 +172,21 @@ def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): """ Pretty print the Node and all its attributes and children (recursively) to a buffer. - - buf: + + buf: Open IO buffer into which the Node is printed. - - offset: - Initial offset (amount of leading spaces) - + + offset: + Initial offset (amount of leading spaces) + attrnames: True if you want to see the attribute names in name=value pairs. False to only see the values. - + nodenames: - True if you want to see the actual node names + True if you want to see the actual node names within their parents. - + showcoord: Do you want the coordinates of each Node to be displayed. @@ -216,47 +221,47 @@ class NodeVisitor(object): - """ A base NodeVisitor class for visiting c_ast nodes. + """ A base NodeVisitor class for visiting c_ast nodes. Subclass it and define your own visit_XXX methods, where - XXX is the class name you want to visit with these + XXX is the class name you want to visit with these methods. - + For example: - + class ConstantVisitor(NodeVisitor): def __init__(self): self.values = [] - + def visit_Constant(self, node): self.values.append(node.value) - Creates a list of values of all the constant nodes + Creates a list of values of all the constant nodes encountered below the given node. To use it: - + cv = ConstantVisitor() cv.visit(node) - + Notes: - - * generic_visit() will be called for AST nodes for which - no visit_XXX method was defined. - * The children of nodes for which a visit_XXX was + + * generic_visit() will be called for AST nodes for which + no visit_XXX method was defined. + * The children of nodes for which a visit_XXX was defined will not be visited - if you need this, call - generic_visit() on the node. + generic_visit() on the node. You can use: NodeVisitor.generic_visit(self, node) * Modeled after Python's own AST visiting facilities (the ast module of Python 3.0) """ def visit(self, node): - """ Visit a node. + """ Visit a node. """ method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) return visitor(node) - + def generic_visit(self, node): - """ Called if no explicit visitor function exists for a + """ Called if no explicit visitor function exists for a node. Implements preorder visiting of the node. """ for c_name, c in node.children(): diff --git a/lib_pypy/cffi/_pycparser/_build_tables.py b/lib_pypy/cffi/_pycparser/_build_tables.py --- a/lib_pypy/cffi/_pycparser/_build_tables.py +++ b/lib_pypy/cffi/_pycparser/_build_tables.py @@ -6,12 +6,11 @@ # Also generates AST code from the configuration file. # Should be called from the pycparser directory. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #----------------------------------------------------------------- # Generate c_ast.py -# from _ast_gen import ASTCodeGenerator ast_gen = ASTCodeGenerator('_c_ast.cfg') ast_gen.generate(open('c_ast.py', 'w')) diff --git a/lib_pypy/cffi/_pycparser/_c_ast.cfg b/lib_pypy/cffi/_pycparser/_c_ast.cfg --- a/lib_pypy/cffi/_pycparser/_c_ast.cfg +++ b/lib_pypy/cffi/_pycparser/_c_ast.cfg @@ -1,188 +1,189 @@ -#----------------------------------------------------------------- -# pycparser: _c_ast_gen.cfg -# -# Defines the AST Node classes used in pycparser. -# -# Each entry is a Node sub-class name, listing the attributes -# and child nodes of the class: -# * - a child node -# ** - a sequence of child nodes -# - an attribute -# -# Copyright (C) 2008-2012, Eli Bendersky -# License: BSD -#----------------------------------------------------------------- - -ArrayDecl: [type*, dim*] - -ArrayRef: [name*, subscript*] - -# op: =, +=, /= etc. -# -Assignment: [op, lvalue*, rvalue*] - -BinaryOp: [op, left*, right*] - -Break: [] - -Case: [expr*, stmts**] - -Cast: [to_type*, expr*] - -# Compound statement in C99 is a list of block items (declarations or -# statements). -# -Compound: [block_items**] - -# Compound literal (anonymous aggregate) for C99. -# (type-name) {initializer_list} -# type: the typename -# init: InitList for the initializer list -# -CompoundLiteral: [type*, init*] - -# type: int, char, float, etc. see CLexer for constant token types -# -Constant: [type, value] - -Continue: [] - -# name: the variable being declared -# quals: list of qualifiers (const, volatile) -# funcspec: list function specifiers (i.e. inline in C99) -# storage: list of storage specifiers (extern, register, etc.) -# type: declaration type (probably nested with all the modifiers) -# init: initialization value, or None -# bitsize: bit field size, or None -# -Decl: [name, quals, storage, funcspec, type*, init*, bitsize*] - -DeclList: [decls**] - -Default: [stmts**] - -DoWhile: [cond*, stmt*] - -# Represents the ellipsis (...) parameter in a function -# declaration -# -EllipsisParam: [] - -# An empty statement (a semicolon ';' on its own) -# -EmptyStatement: [] - -# Enumeration type specifier -# name: an optional ID -# values: an EnumeratorList -# -Enum: [name, values*] - -# A name/value pair for enumeration values -# -Enumerator: [name, value*] - -# A list of enumerators -# -EnumeratorList: [enumerators**] - -# A list of expressions separated by the comma operator. -# -ExprList: [exprs**] - -# This is the top of the AST, representing a single C file (a -# translation unit in K&R jargon). It contains a list of -# "external-declaration"s, which is either declarations (Decl), -# Typedef or function definitions (FuncDef). -# -FileAST: [ext**] - -# for (init; cond; next) stmt -# -For: [init*, cond*, next*, stmt*] - -# name: Id -# args: ExprList -# -FuncCall: [name*, args*] - -# type (args) -# -FuncDecl: [args*, type*] - -# Function definition: a declarator for the function name and -# a body, which is a compound statement. -# There's an optional list of parameter declarations for old -# K&R-style definitions -# -FuncDef: [decl*, param_decls**, body*] - -Goto: [name] - -ID: [name] - -# Holder for types that are a simple identifier (e.g. the built -# ins void, char etc. and typedef-defined types) -# -IdentifierType: [names] - -If: [cond*, iftrue*, iffalse*] - -# An initialization list used for compound literals. -# -InitList: [exprs**] - -Label: [name, stmt*] - -# A named initializer for C99. -# The name of a NamedInitializer is a sequence of Nodes, because -# names can be hierarchical and contain constant expressions. -# -NamedInitializer: [name**, expr*] - -# a list of comma separated function parameter declarations -# -ParamList: [params**] - -PtrDecl: [quals, type*] - -Return: [expr*] - -# name: struct tag name -# decls: declaration of members -# -Struct: [name, decls**] - -# type: . or -> -# name.field or name->field -# -StructRef: [name*, type, field*] - -Switch: [cond*, stmt*] - -# cond ? iftrue : iffalse -# -TernaryOp: [cond*, iftrue*, iffalse*] - -# A base type declaration -# -TypeDecl: [declname, quals, type*] - -# A typedef declaration. -# Very similar to Decl, but without some attributes -# -Typedef: [name, quals, storage, type*] - -Typename: [quals, type*] - -UnaryOp: [op, expr*] - -# name: union tag name -# decls: declaration of members -# -Union: [name, decls**] - -While: [cond*, stmt*] - - - +#----------------------------------------------------------------- +# pycparser: _c_ast.cfg +# +# Defines the AST Node classes used in pycparser. +# +# Each entry is a Node sub-class name, listing the attributes +# and child nodes of the class: +# * - a child node +# ** - a sequence of child nodes +# - an attribute +# +# Copyright (C) 2008-2015, Eli Bendersky +# License: BSD +#----------------------------------------------------------------- + +# ArrayDecl is a nested declaration of an array with the given type. +# dim: the dimension (for example, constant 42) +# dim_quals: list of dimension qualifiers, to support C99's allowing 'const' +# and 'static' within the array dimension in function declarations. +ArrayDecl: [type*, dim*, dim_quals] + +ArrayRef: [name*, subscript*] + +# op: =, +=, /= etc. +# +Assignment: [op, lvalue*, rvalue*] + +BinaryOp: [op, left*, right*] + +Break: [] + +Case: [expr*, stmts**] + +Cast: [to_type*, expr*] + +# Compound statement in C99 is a list of block items (declarations or +# statements). +# +Compound: [block_items**] + +# Compound literal (anonymous aggregate) for C99. +# (type-name) {initializer_list} +# type: the typename +# init: InitList for the initializer list +# +CompoundLiteral: [type*, init*] + +# type: int, char, float, etc. see CLexer for constant token types +# +Constant: [type, value] + +Continue: [] + +# name: the variable being declared +# quals: list of qualifiers (const, volatile) +# funcspec: list function specifiers (i.e. inline in C99) +# storage: list of storage specifiers (extern, register, etc.) +# type: declaration type (probably nested with all the modifiers) +# init: initialization value, or None +# bitsize: bit field size, or None +# +Decl: [name, quals, storage, funcspec, type*, init*, bitsize*] + +DeclList: [decls**] + +Default: [stmts**] + +DoWhile: [cond*, stmt*] + +# Represents the ellipsis (...) parameter in a function +# declaration +# +EllipsisParam: [] + +# An empty statement (a semicolon ';' on its own) +# +EmptyStatement: [] + +# Enumeration type specifier +# name: an optional ID +# values: an EnumeratorList +# +Enum: [name, values*] + +# A name/value pair for enumeration values +# +Enumerator: [name, value*] + +# A list of enumerators +# +EnumeratorList: [enumerators**] + +# A list of expressions separated by the comma operator. +# +ExprList: [exprs**] + +# This is the top of the AST, representing a single C file (a +# translation unit in K&R jargon). It contains a list of +# "external-declaration"s, which is either declarations (Decl), +# Typedef or function definitions (FuncDef). +# +FileAST: [ext**] + +# for (init; cond; next) stmt +# +For: [init*, cond*, next*, stmt*] + +# name: Id +# args: ExprList +# +FuncCall: [name*, args*] + +# type (args) +# +FuncDecl: [args*, type*] + +# Function definition: a declarator for the function name and +# a body, which is a compound statement. +# There's an optional list of parameter declarations for old +# K&R-style definitions +# +FuncDef: [decl*, param_decls**, body*] + +Goto: [name] + +ID: [name] + +# Holder for types that are a simple identifier (e.g. the built +# ins void, char etc. and typedef-defined types) +# +IdentifierType: [names] + +If: [cond*, iftrue*, iffalse*] + +# An initialization list used for compound literals. +# +InitList: [exprs**] + +Label: [name, stmt*] + +# A named initializer for C99. +# The name of a NamedInitializer is a sequence of Nodes, because +# names can be hierarchical and contain constant expressions. +# +NamedInitializer: [name**, expr*] + +# a list of comma separated function parameter declarations +# +ParamList: [params**] + +PtrDecl: [quals, type*] + +Return: [expr*] + +# name: struct tag name +# decls: declaration of members +# +Struct: [name, decls**] + +# type: . or -> +# name.field or name->field +# +StructRef: [name*, type, field*] + +Switch: [cond*, stmt*] + +# cond ? iftrue : iffalse +# +TernaryOp: [cond*, iftrue*, iffalse*] + +# A base type declaration +# +TypeDecl: [declname, quals, type*] + +# A typedef declaration. +# Very similar to Decl, but without some attributes +# +Typedef: [name, quals, storage, type*] + +Typename: [name, quals, type*] + +UnaryOp: [op, expr*] + +# name: union tag name +# decls: declaration of members +# +Union: [name, decls**] + +While: [cond*, stmt*] diff --git a/lib_pypy/cffi/_pycparser/ast_transforms.py b/lib_pypy/cffi/_pycparser/ast_transforms.py --- a/lib_pypy/cffi/_pycparser/ast_transforms.py +++ b/lib_pypy/cffi/_pycparser/ast_transforms.py @@ -3,7 +3,7 @@ # # Some utilities used by the parser to create a friendlier AST. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ @@ -84,7 +84,7 @@ _extract_nested_case(child, new_compound.block_items) last_case = new_compound.block_items[-1] else: - # Other statements are added as childrent to the last case, if it + # Other statements are added as children to the last case, if it # exists. if last_case is None: new_compound.block_items.append(child) diff --git a/lib_pypy/cffi/_pycparser/c_ast.py b/lib_pypy/cffi/_pycparser/c_ast.py --- a/lib_pypy/cffi/_pycparser/c_ast.py +++ b/lib_pypy/cffi/_pycparser/c_ast.py @@ -1,7 +1,7 @@ #----------------------------------------------------------------- # ** ATTENTION ** # This code was automatically generated from the file: -# _c_ast.cfg +# _c_ast.cfg # # Do not modify it directly. Modify the configuration file and # run the generator again. @@ -11,7 +11,7 @@ # # AST Node classes. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #----------------------------------------------------------------- @@ -20,6 +20,7 @@ class Node(object): + __slots__ = () """ Abstract base class for AST nodes. """ def children(self): @@ -30,21 +31,21 @@ def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): """ Pretty print the Node and all its attributes and children (recursively) to a buffer. - - buf: + + buf: Open IO buffer into which the Node is printed. - - offset: - Initial offset (amount of leading spaces) - + + offset: + Initial offset (amount of leading spaces) + attrnames: True if you want to see the attribute names in name=value pairs. False to only see the values. - + nodenames: - True if you want to see the actual node names + True if you want to see the actual node names within their parents. - + showcoord: Do you want the coordinates of each Node to be displayed. @@ -79,47 +80,47 @@ class NodeVisitor(object): - """ A base NodeVisitor class for visiting c_ast nodes. + """ A base NodeVisitor class for visiting c_ast nodes. Subclass it and define your own visit_XXX methods, where - XXX is the class name you want to visit with these + XXX is the class name you want to visit with these methods. - + For example: - + class ConstantVisitor(NodeVisitor): def __init__(self): self.values = [] - + def visit_Constant(self, node): self.values.append(node.value) - Creates a list of values of all the constant nodes + Creates a list of values of all the constant nodes encountered below the given node. To use it: - + cv = ConstantVisitor() cv.visit(node) - + Notes: - - * generic_visit() will be called for AST nodes for which - no visit_XXX method was defined. - * The children of nodes for which a visit_XXX was + + * generic_visit() will be called for AST nodes for which + no visit_XXX method was defined. + * The children of nodes for which a visit_XXX was defined will not be visited - if you need this, call - generic_visit() on the node. + generic_visit() on the node. You can use: NodeVisitor.generic_visit(self, node) * Modeled after Python's own AST visiting facilities (the ast module of Python 3.0) """ def visit(self, node): - """ Visit a node. + """ Visit a node. """ method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) return visitor(node) - + def generic_visit(self, node): - """ Called if no explicit visitor function exists for a + """ Called if no explicit visitor function exists for a node. Implements preorder visiting of the node. """ for c_name, c in node.children(): @@ -127,9 +128,11 @@ class ArrayDecl(Node): - def __init__(self, type, dim, coord=None): + __slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__') + def __init__(self, type, dim, dim_quals, coord=None): self.type = type self.dim = dim + self.dim_quals = dim_quals self.coord = coord def children(self): @@ -138,9 +141,10 @@ if self.dim is not None: nodelist.append(("dim", self.dim)) return tuple(nodelist) - attr_names = () + attr_names = ('dim_quals', ) class ArrayRef(Node): + __slots__ = ('name', 'subscript', 'coord', '__weakref__') def __init__(self, name, subscript, coord=None): self.name = name self.subscript = subscript @@ -155,6 +159,7 @@ attr_names = () class Assignment(Node): + __slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__') def __init__(self, op, lvalue, rvalue, coord=None): self.op = op self.lvalue = lvalue @@ -167,9 +172,10 @@ if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue)) return tuple(nodelist) - attr_names = ('op',) + attr_names = ('op', ) class BinaryOp(Node): + __slots__ = ('op', 'left', 'right', 'coord', '__weakref__') def __init__(self, op, left, right, coord=None): self.op = op self.left = left @@ -182,9 +188,10 @@ if self.right is not None: nodelist.append(("right", self.right)) return tuple(nodelist) - attr_names = ('op',) + attr_names = ('op', ) class Break(Node): + __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord @@ -194,6 +201,7 @@ attr_names = () class Case(Node): + __slots__ = ('expr', 'stmts', 'coord', '__weakref__') def __init__(self, expr, stmts, coord=None): self.expr = expr self.stmts = stmts @@ -209,6 +217,7 @@ attr_names = () class Cast(Node): + __slots__ = ('to_type', 'expr', 'coord', '__weakref__') def __init__(self, to_type, expr, coord=None): self.to_type = to_type self.expr = expr @@ -223,6 +232,7 @@ attr_names = () class Compound(Node): + __slots__ = ('block_items', 'coord', '__weakref__') def __init__(self, block_items, coord=None): self.block_items = block_items self.coord = coord @@ -236,6 +246,7 @@ attr_names = () class CompoundLiteral(Node): + __slots__ = ('type', 'init', 'coord', '__weakref__') def __init__(self, type, init, coord=None): self.type = type self.init = init @@ -250,6 +261,7 @@ attr_names = () class Constant(Node): + __slots__ = ('type', 'value', 'coord', '__weakref__') def __init__(self, type, value, coord=None): self.type = type self.value = value @@ -259,9 +271,10 @@ nodelist = [] return tuple(nodelist) - attr_names = ('type','value',) + attr_names = ('type', 'value', ) class Continue(Node): + __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord @@ -271,6 +284,7 @@ attr_names = () class Decl(Node): + __slots__ = ('name', 'quals', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__') def __init__(self, name, quals, storage, funcspec, type, init, bitsize, coord=None): self.name = name self.quals = quals @@ -288,9 +302,10 @@ if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize)) return tuple(nodelist) - attr_names = ('name','quals','storage','funcspec',) + attr_names = ('name', 'quals', 'storage', 'funcspec', ) class DeclList(Node): + __slots__ = ('decls', 'coord', '__weakref__') def __init__(self, decls, coord=None): self.decls = decls self.coord = coord @@ -304,6 +319,7 @@ attr_names = () class Default(Node): + __slots__ = ('stmts', 'coord', '__weakref__') def __init__(self, stmts, coord=None): self.stmts = stmts self.coord = coord @@ -317,6 +333,7 @@ attr_names = () class DoWhile(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt @@ -331,6 +348,7 @@ attr_names = () class EllipsisParam(Node): + __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord @@ -340,6 +358,7 @@ attr_names = () class EmptyStatement(Node): + __slots__ = ('coord', '__weakref__') def __init__(self, coord=None): self.coord = coord @@ -349,6 +368,7 @@ attr_names = () class Enum(Node): + __slots__ = ('name', 'values', 'coord', '__weakref__') def __init__(self, name, values, coord=None): self.name = name self.values = values @@ -359,9 +379,10 @@ if self.values is not None: nodelist.append(("values", self.values)) return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class Enumerator(Node): + __slots__ = ('name', 'value', 'coord', '__weakref__') def __init__(self, name, value, coord=None): self.name = name self.value = value @@ -372,9 +393,10 @@ if self.value is not None: nodelist.append(("value", self.value)) return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class EnumeratorList(Node): + __slots__ = ('enumerators', 'coord', '__weakref__') def __init__(self, enumerators, coord=None): self.enumerators = enumerators self.coord = coord @@ -388,6 +410,7 @@ attr_names = () class ExprList(Node): + __slots__ = ('exprs', 'coord', '__weakref__') def __init__(self, exprs, coord=None): self.exprs = exprs self.coord = coord @@ -401,6 +424,7 @@ attr_names = () class FileAST(Node): + __slots__ = ('ext', 'coord', '__weakref__') def __init__(self, ext, coord=None): self.ext = ext self.coord = coord @@ -414,6 +438,7 @@ attr_names = () class For(Node): + __slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__') def __init__(self, init, cond, next, stmt, coord=None): self.init = init self.cond = cond @@ -432,6 +457,7 @@ attr_names = () class FuncCall(Node): + __slots__ = ('name', 'args', 'coord', '__weakref__') def __init__(self, name, args, coord=None): self.name = name self.args = args @@ -446,6 +472,7 @@ attr_names = () class FuncDecl(Node): + __slots__ = ('args', 'type', 'coord', '__weakref__') def __init__(self, args, type, coord=None): self.args = args self.type = type @@ -460,6 +487,7 @@ attr_names = () class FuncDef(Node): + __slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__') def __init__(self, decl, param_decls, body, coord=None): self.decl = decl self.param_decls = param_decls @@ -477,6 +505,7 @@ attr_names = () class Goto(Node): + __slots__ = ('name', 'coord', '__weakref__') def __init__(self, name, coord=None): self.name = name self.coord = coord @@ -485,9 +514,10 @@ nodelist = [] return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class ID(Node): + __slots__ = ('name', 'coord', '__weakref__') def __init__(self, name, coord=None): self.name = name self.coord = coord @@ -496,9 +526,10 @@ nodelist = [] return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class IdentifierType(Node): + __slots__ = ('names', 'coord', '__weakref__') def __init__(self, names, coord=None): self.names = names self.coord = coord @@ -507,9 +538,10 @@ nodelist = [] return tuple(nodelist) - attr_names = ('names',) + attr_names = ('names', ) class If(Node): + __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') def __init__(self, cond, iftrue, iffalse, coord=None): self.cond = cond self.iftrue = iftrue @@ -526,6 +558,7 @@ attr_names = () class InitList(Node): + __slots__ = ('exprs', 'coord', '__weakref__') def __init__(self, exprs, coord=None): self.exprs = exprs self.coord = coord @@ -539,6 +572,7 @@ attr_names = () class Label(Node): + __slots__ = ('name', 'stmt', 'coord', '__weakref__') def __init__(self, name, stmt, coord=None): self.name = name self.stmt = stmt @@ -549,9 +583,10 @@ if self.stmt is not None: nodelist.append(("stmt", self.stmt)) return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class NamedInitializer(Node): + __slots__ = ('name', 'expr', 'coord', '__weakref__') def __init__(self, name, expr, coord=None): self.name = name self.expr = expr @@ -567,6 +602,7 @@ attr_names = () class ParamList(Node): + __slots__ = ('params', 'coord', '__weakref__') def __init__(self, params, coord=None): self.params = params self.coord = coord @@ -580,6 +616,7 @@ attr_names = () class PtrDecl(Node): + __slots__ = ('quals', 'type', 'coord', '__weakref__') def __init__(self, quals, type, coord=None): self.quals = quals self.type = type @@ -590,9 +627,10 @@ if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) - attr_names = ('quals',) + attr_names = ('quals', ) class Return(Node): + __slots__ = ('expr', 'coord', '__weakref__') def __init__(self, expr, coord=None): self.expr = expr self.coord = coord @@ -605,6 +643,7 @@ attr_names = () class Struct(Node): + __slots__ = ('name', 'decls', 'coord', '__weakref__') def __init__(self, name, decls, coord=None): self.name = name self.decls = decls @@ -616,9 +655,10 @@ nodelist.append(("decls[%d]" % i, child)) return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class StructRef(Node): + __slots__ = ('name', 'type', 'field', 'coord', '__weakref__') def __init__(self, name, type, field, coord=None): self.name = name self.type = type @@ -631,9 +671,10 @@ if self.field is not None: nodelist.append(("field", self.field)) return tuple(nodelist) - attr_names = ('type',) + attr_names = ('type', ) class Switch(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt @@ -648,6 +689,7 @@ attr_names = () class TernaryOp(Node): + __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') def __init__(self, cond, iftrue, iffalse, coord=None): self.cond = cond self.iftrue = iftrue @@ -664,6 +706,7 @@ attr_names = () class TypeDecl(Node): + __slots__ = ('declname', 'quals', 'type', 'coord', '__weakref__') def __init__(self, declname, quals, type, coord=None): self.declname = declname self.quals = quals @@ -675,9 +718,10 @@ if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) - attr_names = ('declname','quals',) + attr_names = ('declname', 'quals', ) class Typedef(Node): + __slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__') def __init__(self, name, quals, storage, type, coord=None): self.name = name self.quals = quals @@ -690,10 +734,12 @@ if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) - attr_names = ('name','quals','storage',) + attr_names = ('name', 'quals', 'storage', ) class Typename(Node): - def __init__(self, quals, type, coord=None): + __slots__ = ('name', 'quals', 'type', 'coord', '__weakref__') + def __init__(self, name, quals, type, coord=None): + self.name = name self.quals = quals self.type = type self.coord = coord @@ -703,9 +749,10 @@ if self.type is not None: nodelist.append(("type", self.type)) return tuple(nodelist) - attr_names = ('quals',) + attr_names = ('name', 'quals', ) class UnaryOp(Node): + __slots__ = ('op', 'expr', 'coord', '__weakref__') def __init__(self, op, expr, coord=None): self.op = op self.expr = expr @@ -716,9 +763,10 @@ if self.expr is not None: nodelist.append(("expr", self.expr)) return tuple(nodelist) - attr_names = ('op',) + attr_names = ('op', ) class Union(Node): + __slots__ = ('name', 'decls', 'coord', '__weakref__') def __init__(self, name, decls, coord=None): self.name = name self.decls = decls @@ -730,9 +778,10 @@ nodelist.append(("decls[%d]" % i, child)) return tuple(nodelist) - attr_names = ('name',) + attr_names = ('name', ) class While(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') def __init__(self, cond, stmt, coord=None): self.cond = cond self.stmt = stmt diff --git a/lib_pypy/cffi/_pycparser/c_generator.py b/lib_pypy/cffi/_pycparser/c_generator.py --- a/lib_pypy/cffi/_pycparser/c_generator.py +++ b/lib_pypy/cffi/_pycparser/c_generator.py @@ -3,7 +3,7 @@ # # C code generator from pycparser AST nodes. # -# Copyright (C) 2008-2012, Eli Bendersky +# Copyright (C) 2008-2015, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ from . import c_ast @@ -15,8 +15,6 @@ generic_visit. """ def __init__(self): - self.output = '' - # Statements start with indentation of self.indent_level spaces, using # the _make_indent method # @@ -34,7 +32,7 @@ if node is None: return '' else: - return ''.join(self.visit(c) for c in node.children()) + return ''.join(self.visit(c) for c_name, c in node.children()) From pypy.commits at gmail.com Fri Jan 15 03:14:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 00:14:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: jumping over pair (end of regalloc) if one of them is forbidden Message-ID: <5698aa73.9a6f1c0a.aa828.7614@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81777:4a9e35058f3e Date: 2016-01-15 09:13 +0100 http://bitbucket.org/pypy/pypy/changeset/4a9e35058f3e/ Log: jumping over pair (end of regalloc) if one of them is forbidden diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -189,7 +189,7 @@ even, odd = None, None REGS = r.registers i = len(self.free_regs)-1 - candidates = [] + candidates = {} while i >= 0: even = self.free_regs[i] if even.is_even(): @@ -198,7 +198,7 @@ if odd not in self.free_regs: # sadly odd is not free, but for spilling # we found a candidate - candidates.append(odd) + candidates[odd] = True i -= 1 continue assert var not in self.reg_bindings @@ -218,7 +218,7 @@ if even in r.MANAGED_REGS and even not in self.free_regs: # yes even might be a candidate # this means that odd is free, but not even - candidates.append(even) + candidates[even] = True i -= 1 if len(candidates) != 0: @@ -275,8 +275,10 @@ reg2 = r.MANAGED_REGS[i+1] assert reg.is_even() and reg2.is_odd() ovar = reverse_mapping[reg] - ovar2 = reverse_mapping[reg2] - if ovar in forbidden_vars or ovar2 in forbidden_vars: + if ovar in forbidden_vars: + continue + ovar2 = reverse_mapping.get(reg2, None) + if ovar2 is not None and ovar2 in forbidden_vars: # blocked, try other register pair continue even = reg @@ -284,7 +286,8 @@ self._sync_var(ovar) self._sync_var(ovar2) del self.reg_bindings[ovar] - del self.reg_bindings[ovar2] + if ovar2 is not None: + del self.reg_bindings[ovar2] # both are not added to free_regs! no need to do so self.reg_bindings[var] = even self.reg_bindings[var2] = odd From pypy.commits at gmail.com Fri Jan 15 04:03:41 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 01:03:41 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Write a warning if _continuation is imported when we call _vmprof.enable() Message-ID: <5698b5ed.41dfc20a.8feba.ffff97fb@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81778:63917019c08b Date: 2016-01-15 09:51 +0100 http://bitbucket.org/pypy/pypy/changeset/63917019c08b/ Log: Write a warning if _continuation is imported when we call _vmprof.enable() diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -59,6 +59,11 @@ 'interval' is a float representing the sampling interval, in seconds. Must be smaller than 1.0 """ + w_modules = space.sys.get('modules') + if space.contains(w_modules, space.wrap('_continuation')): + space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + "with vmprof will crash"), + space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: From pypy.commits at gmail.com Fri Jan 15 04:03:43 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 01:03:43 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix some tests Message-ID: <5698b5ef.9a6f1c0a.aa828.ffff86a1@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81779:334e44a24a23 Date: 2016-01-15 10:02 +0100 http://bitbucket.org/pypy/pypy/changeset/334e44a24a23/ Log: fix some tests diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -315,7 +315,7 @@ while addr != start: addr -= sizeofaddr gc._trace_callback(callback, arg, addr) - + gc = gctransformer.gcdata.gc assert not hasattr(gc, 'custom_trace_dispatcher') # ^^^ create_custom_trace_funcs() must not run before this diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -193,7 +193,7 @@ if we_are_translated() and not jit.we_are_jitted(): unique_id = get_code_fn(*args)._vmprof_unique_id ll_args, token = lower(*args) - ll_trampoline = get_ll_trampoline(token, True) + ll_trampoline = get_ll_trampoline(token) ll_result = ll_trampoline(*ll_args + (unique_id,)) else: return func(*args) From pypy.commits at gmail.com Fri Jan 15 04:05:12 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 01:05:12 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: oups Message-ID: <5698b648.4473c20a.c47af.ffff9c37@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81780:6d38653b9837 Date: 2016-01-15 10:04 +0100 http://bitbucket.org/pypy/pypy/changeset/6d38653b9837/ Log: oups diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,7 +60,7 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains(w_modules, space.wrap('_continuation')): + if space.is_true(space.contains(w_modules, space.wrap('_continuation'))): space.warn(space.wrap("Using _continuation/greenlet/stacklet together " "with vmprof will crash"), space.w_RuntimeWarning) From pypy.commits at gmail.com Fri Jan 15 04:30:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 01:30:17 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed a debug test case from yesterdays bughunt Message-ID: <5698bc29.022f1c0a.75d82.ffff9136@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81781:58d934d5d888 Date: 2016-01-15 09:30 +0100 http://bitbucket.org/pypy/pypy/changeset/58d934d5d888/ Log: removed a debug test case from yesterdays bughunt diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -28,36 +28,3 @@ # realloc frame takes the most space (from just after larl, to lay) bridge_loop_instructions = "larl; lg; cgfi; je; lghi; stg; " \ "lay; lgfi;( iihf;)? lgfi;( iihf;)? basr; lay; lg; br;$" - - def test_multiple_arguments(self): - from rpython.rtyper.annlowlevel import llhelper - from rpython.jit.metainterp.typesystem import deref - from rpython.rlib.jit_libffi import types - from rpython.jit.codewriter.effectinfo import EffectInfo - from rpython.rlib.rarithmetic import intmask - - def func_int(a, b, c, d, e, f): - sum = intmask(a) + intmask(b) + intmask(c) + intmask(d) + intmask(e) + intmask(f) - return sum - - functions = [ - (func_int, lltype.Signed, types.sint, 655360, 655360), - (func_int, lltype.Signed, types.sint, 655360, -293999429), - ] - - cpu = self.cpu - for func, TP, ffi_type, num, num1 in functions: - # - FPTR = self.Ptr(self.FuncType([TP] * 6, TP)) - func_ptr = llhelper(FPTR, func) - FUNC = deref(FPTR) - funcbox = self.get_funcbox(cpu, func_ptr) - # first, try it with the "normal" calldescr - calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo.MOST_GENERAL) - iargs = [0x7fffFFFFffffFFFF,1,0,0,0,0] - args = [InputArgInt(num) for num in iargs] - res = self.execute_operation(rop.CALL_I, - [funcbox] + args, - 'int', descr=calldescr) - assert res == sum(iargs) From pypy.commits at gmail.com Fri Jan 15 04:30:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 01:30:19 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: libffi issue handled just after calling. narrower integer types trigger zero/sign extension Message-ID: <5698bc2b.41dfc20a.8feba.ffffa15a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81782:ffbab923d30d Date: 2016-01-15 10:29 +0100 http://bitbucket.org/pypy/pypy/changeset/ffbab923d30d/ Log: libffi issue handled just after calling. narrower integer types trigger zero/sign extension diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -42,6 +42,7 @@ self.current_clt = None self._regalloc = None self.datablockwrapper = None + self.subject_op = None # needed in call assembler to pass by the operation self.propagate_exception_path = 0 self.stack_check_slowpath = 0 self.loop_run_counters = [] diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -10,6 +10,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.jit.backend.llsupport import llerrno from rpython.rtyper.lltypesystem import rffi +from rpython.jit.backend.llsupport.descr import CallDescr class CallBuilder(AbstractCallBuilder): GPR_ARGS = [r.r2, r.r3, r.r4, r.r5, r.r6] @@ -19,9 +20,18 @@ RSHADOWPTR = r.r9 RFASTGILPTR = r.r10 - def __init__(self, assembler, fnloc, arglocs, resloc): + def __init__(self, assembler, fnloc, arglocs, resloc, calldescr): + type = INT + size = None + self.ressign = True + if calldescr is not None: + assert isinstance(calldescr, CallDescr) + type = calldescr.get_result_type() + size = calldescr.get_result_size() + self.ressign = calldescr.is_result_signed() + AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, - resloc, restype=INT, ressize=None) + resloc, restype=type, ressize=size) def prepare_arguments(self): self.subtracted_to_sp = 0 @@ -144,6 +154,30 @@ # move the frame pointer self.mc.LAY(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) self.mc.raw_call() + # + self.ensure_correct_signzero_extension() + + def ensure_correct_signzero_extension(self): + if self.restype == 'i' and self.ressize != WORD: + # we must be sure! libffi (s390x impl) will not return + # a sane 64 bit zero/sign extended value. fix for this + # has been rejected (Jan. 2016). This will not be updated + # any time soon... + if self.ressign: + # sign extend! + if self.ressize == 1: self.mc.LGBR(r.r2, r.r2) + elif self.ressize == 2: self.mc.LGHR(r.r2, r.r2) + elif self.ressize == 4: self.mc.LGFR(r.r2, r.r2) + else: + assert 0, "cannot sign extend size %d" % self.ressize + else: + # zero extend! + if self.ressize == 1: self.mc.LLGCR(r.r2, r.r2) + elif self.ressize == 2: self.mc.LLGHR(r.r2, r.r2) + elif self.ressize == 4: self.mc.LLGFR(r.r2, r.r2) + else: + assert 0, "cannot zero extend size %d" % self.ressize + def restore_stack_pointer(self): # it must at LEAST be 160 bytes diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -155,10 +155,14 @@ 'LOCGR': ('rrf_c', ['\xB9','\xE2']), 'LOCG': ('rsy_b', ['\xEB','\xE2']), - # load for sign ext + # load with sign ext 'LGBR': ('rre', ['\xB9','\x06']), 'LGHR': ('rre', ['\xB9','\x07']), 'LGFR': ('rre', ['\xB9','\x14']), + # load with zero ext + 'LLGCR': ('rre', ['\xB9','\x84']), + 'LLGHR': ('rre', ['\xB9','\x85']), + 'LLGFR': ('rre', ['\xB9','\x16']), # store memory 'STMG': ('rsy_a', ['\xEB','\x24']), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -279,7 +279,7 @@ adr = arglocs[func_index] arglist = arglocs[func_index+1:] - cb = callbuilder.CallBuilder(self, adr, arglist, resloc) + cb = callbuilder.CallBuilder(self, adr, arglist, resloc, op.getdescr()) descr = op.getdescr() assert isinstance(descr, CallDescr) @@ -1076,7 +1076,9 @@ vloc = imm(0) self._store_force_index(self._find_nearby_operation(regalloc, +1)) # 'result_loc' is either r2, f0 or None + self.subject_op = op self.call_assembler(op, argloc, vloc, result_loc, r.r2) + self.subject_op = None self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) emit_call_assembler_i = _genop_call_assembler @@ -1090,11 +1092,13 @@ self.regalloc_mov(argloc, r.r2) self.mc.LG(r.r3, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) - cb = callbuilder.CallBuilder(self, addr, [r.r2, r.r3], r.r2) + descr = self.subject_op.getdescr() + cb = callbuilder.CallBuilder(self, addr, [r.r2, r.r3], r.r2, descr) cb.emit() def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): - cb = callbuilder.CallBuilder(self, addr, arglocs, result_loc) + descr = self.subject_op.getdescr() + cb = callbuilder.CallBuilder(self, addr, arglocs, result_loc, descr) cb.emit() def _call_assembler_check_descr(self, value, tmploc): From pypy.commits at gmail.com Fri Jan 15 04:49:41 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 01:49:41 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: more than one pool constant did not correctly increment the pool cursor Message-ID: <5698c0b5.6adec20a.1460b.ffffaaca@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81783:fac01ad9537d Date: 2016-01-15 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/fac01ad9537d/ Log: more than one pool constant did not correctly increment the pool cursor diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -129,19 +129,19 @@ written = 0 if self.constant_64_ones != -1: asm.mc.write('\xFF' * 8) - self.constant_64_ones = self.size + self.constant_64_ones = self.size + written written += 8 if self.constant_64_zeros != -1: asm.mc.write('\x00' * 8) - self.constant_64_zeros = self.size + self.constant_64_zeros = self.size + written written += 8 if self.constant_64_sign_bit != -1: asm.mc.write('\x80' + ('\x00' * 7)) - self.constant_64_sign_bit = self.size + self.constant_64_sign_bit = self.size + written written += 8 if self.constant_max_64_positive != -1: asm.mc.write('\x7F' + ('\xFF' * 7)) - self.constant_max_64_positive = self.size + self.constant_max_64_positive = self.size + written written += 8 self.size += written if not we_are_translated(): From pypy.commits at gmail.com Fri Jan 15 05:05:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 02:05:56 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed 2 translation issues Message-ID: <5698c484.88d31c0a.3ad3.ffffa061@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81784:6d6bbc549ae1 Date: 2016-01-15 11:05 +0100 http://bitbucket.org/pypy/pypy/changeset/6d6bbc549ae1/ Log: fixed 2 translation issues diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -22,7 +22,7 @@ def __init__(self, assembler, fnloc, arglocs, resloc, calldescr): type = INT - size = None + size = WORD self.ressign = True if calldescr is not None: assert isinstance(calldescr, CallDescr) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -266,7 +266,9 @@ # there is no candidate pair that only would # require one spill, thus we need to spill two! # this is a rare case! - reverse_mapping = { reg : var for var, reg in self.reg_bindings.items() } + reverse_mapping = {} + for var, reg in self.reg_bindings.items(): + reverse_mapping[reg] = var # always take the first for i, reg in enumerate(r.MANAGED_REGS): if i % 2 == 1: From pypy.commits at gmail.com Fri Jan 15 05:16:27 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:16:27 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Use a thread-local. Kill a lot of code no longer needed. Message-ID: <5698c6fb.4473c20a.c47af.ffffb603@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81785:00d3221560e3 Date: 2016-01-15 11:15 +0100 http://bitbucket.org/pypy/pypy/changeset/00d3221560e3/ Log: Use a thread-local. Kill a lot of code no longer needed. diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform as platform +from rpython.rlib import rthread from rpython.jit.backend import detect_cpu @@ -70,76 +71,33 @@ def _freeze_(self): return True -def token2lltype(tok): - if tok == 'i': - return lltype.Signed - if tok == 'r': - return llmemory.GCREF - raise NotImplementedError(repr(tok)) -def token2ctype(tok): - if tok == 'i': - return 'long' - elif tok == 'r': - return 'void*' - elif tok == 'f': - return 'double' - else: - raise NotImplementedError(repr(tok)) +# --- copy a few declarations from src/vmprof_stack.h --- -def make_c_trampoline_function(name, func, token, restok): - cont_name = 'rpyvmprof_f_%s_%s' % (name, token) - tramp_name = 'rpyvmprof_t_%s_%s' % (name, token) +VMPROF_CODE_TAG = 1 - func.c_name = cont_name - func._dont_inline_ = True +VMPROFSTACK = lltype.ForwardReference() +PVMPROFSTACK = lltype.Ptr(VMPROFSTACK) +VMPROFSTACK.become(rffi.CStruct("vmprof_stack_s", + ('next', PVMPROFSTACK), + ('value', lltype.Signed), + ('kind', lltype.Signed))) +# ---------- - assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( - "rvmprof only supports x86-64 CPUs for now") - llargs = ", ".join(["%s arg%d" % (token2ctype(x), i) for i, x in - enumerate(token)]) - type = token2ctype(restok) - target = udir.join('module_cache') - target.ensure(dir=1) - argnames = ", ".join(["arg%d" % i for i in range(len(token))]) - vmprof_stack_h = SRC.join("vmprof_stack.h").read() - target = target.join('trampoline_%s_%s.vmprof.c' % (name, token)) - target.write(""" -#include "src/precommondefs.h" -#include "vmprof_stack.h" +vmprof_tl_stack = rthread.ThreadLocalField(PVMPROFSTACK, "vmprof_tl_stack") +do_use_eci = rffi.llexternal_use_eci( + ExternalCompilationInfo(includes=['vmprof_stack.h'], + include_dirs = [SRC])) -extern vmprof_stack* vmprof_global_stack; +def enter_code(unique_id): + do_use_eci() + s = lltype.malloc(VMPROFSTACK, flavor='raw') + s.c_next = vmprof_tl_stack.get_or_make_raw() + s.c_value = unique_id + s.c_kind = VMPROF_CODE_TAG + vmprof_tl_stack.setraw(s) + return s -%(type)s %(cont_name)s(%(llargs)s); - -%(type)s %(tramp_name)s(%(llargs)s, long unique_id) -{ - %(type)s result; - struct vmprof_stack node; - - node.value = unique_id; - node.kind = VMPROF_CODE_TAG; - node.next = vmprof_global_stack; - vmprof_global_stack = &node; - result = %(cont_name)s(%(argnames)s); - vmprof_global_stack = node.next; - return result; -} -""" % locals()) - header = 'RPY_EXTERN %s %s(%s);\n' % ( - token2ctype(restok), tramp_name, - ', '.join([token2ctype(tok) for tok in token] + ['long'])) - - eci = ExternalCompilationInfo( - post_include_bits = [header], - separate_module_files = [str(target)], - ) - eci = eci.merge(global_eci) - ARGS = [token2lltype(tok) for tok in token] + [lltype.Signed] - return rffi.llexternal( - tramp_name, ARGS, - token2lltype(restok), - compilation_info=eci, - _nowrapper=True, sandboxsafe=True, - random_effects_on_gcobjs=True) +def leave_code(s): + vmprof_tl_stack.setraw(s.c_next) diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py --- a/rpython/rlib/rvmprof/rvmprof.py +++ b/rpython/rlib/rvmprof/rvmprof.py @@ -141,12 +141,7 @@ 'get_code_fn(*args)' is called to extract the code object from the arguments given to the decorated function. - The original function can return None, an integer, or an instance. - In the latter case (only), 'result_class' must be set. - - NOTE: for now, this assumes that the decorated functions only takes - instances or plain integer arguments, and at most 5 of them - (including 'self' if applicable). + 'result_class' is ignored (backward compatibility). """ def decorate(func): try: @@ -154,53 +149,19 @@ except cintf.VMProfPlatformUnsupported: return func - if hasattr(func, 'im_self'): - assert func.im_self is None - func = func.im_func - - def lower(*args): - if len(args) == 0: - return (), "" - ll_args, token = lower(*args[1:]) - ll_arg = args[0] - if isinstance(ll_arg, int): - tok = "i" - else: - tok = "r" - ll_arg = cast_instance_to_gcref(ll_arg) - return (ll_arg,) + ll_args, tok + token - - @specialize.memo() - def get_ll_trampoline(token): - """ Used by the trampoline-version only - """ - if result_class is None: - restok = "i" - else: - restok = "r" - return cintf.make_c_trampoline_function(name, func, token, restok) - def decorated_function(*args): - # go through the asm trampoline ONLY if we are translated but not - # being JITted. - # - # If we are not translated, we obviously don't want to go through - # the trampoline because there is no C function it can call. - # # If we are being JITted, we want to skip the trampoline, else the # JIT cannot see through it. - # - if we_are_translated() and not jit.we_are_jitted(): + if not jit.we_are_jitted(): unique_id = get_code_fn(*args)._vmprof_unique_id - ll_args, token = lower(*args) - ll_trampoline = get_ll_trampoline(token) - ll_result = ll_trampoline(*ll_args + (unique_id,)) + x = cintf.enter_code(unique_id) + try: + return func(*args) + finally: + cintf.leave_code(x) else: return func(*args) - if result_class is not None: - return cast_base_ptr_to_instance(result_class, ll_result) - else: - return ll_result + decorated_function.__name__ = func.__name__ + '_rvmprof' return decorated_function diff --git a/rpython/rlib/rvmprof/src/rvmprof.h b/rpython/rlib/rvmprof/src/rvmprof.h --- a/rpython/rlib/rvmprof/src/rvmprof.h +++ b/rpython/rlib/rvmprof/src/rvmprof.h @@ -8,4 +8,3 @@ RPY_EXTERN int vmprof_stack_append(void*, long); RPY_EXTERN long vmprof_stack_pop(void*); RPY_EXTERN void vmprof_stack_free(void*); -RPY_EXTERN void* vmprof_address_of_global_stack(void); \ No newline at end of file diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -48,13 +48,6 @@ -RPY_EXTERN vmprof_stack* vmprof_global_stack; - -RPY_EXTERN void *vmprof_address_of_global_stack(void) -{ - return (void*)&vmprof_global_stack; -} - RPY_EXTERN char *vmprof_init(int fd, double interval, char *interp_name) { @@ -115,8 +108,6 @@ #define VERSION_THREAD_ID '\x01' #define VERSION_TAG '\x02' -vmprof_stack* vmprof_global_stack = NULL; - struct prof_stacktrace_s { char padding[sizeof(long) - 1]; char marker; @@ -135,9 +126,16 @@ * ************************************************************* */ +#include "src/threadlocal.h" + +static vmprof_stack_t *get_vmprof_stack(void) +{ + return RPY_THREADLOCALREF_GET(vmprof_tl_stack); +} + static int get_stack_trace(intptr_t *result, int max_depth, intptr_t pc, ucontext_t *ucontext) { - struct vmprof_stack* stack = vmprof_global_stack; + vmprof_stack_t* stack = get_vmprof_stack(); int n = 0; intptr_t addr = 0; int bottom_jitted = 0; diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -1,7 +1,7 @@ #include -#define VMPROF_CODE_TAG 1 +#define VMPROF_CODE_TAG 1 /* <- also in cintf.py */ #define VMPROF_BLACKHOLE_TAG 2 #define VMPROF_JITTED_TAG 3 #define VMPROF_JITTING_TAG 4 @@ -9,11 +9,11 @@ #define VMPROF_ASSEMBLER_TAG 6 // whatever we want here -typedef struct vmprof_stack { - struct vmprof_stack* next; +typedef struct vmprof_stack_s { + struct vmprof_stack_s* next; intptr_t value; intptr_t kind; -} vmprof_stack; +} vmprof_stack_t; // the kind is WORD so we consume exactly 3 WORDs and we don't have // to worry too much. There is a potential for squeezing it with bit From pypy.commits at gmail.com Fri Jan 15 05:17:01 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:17:01 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Fix massive leak :-) Message-ID: <5698c71d.465fc20a.54af0.ffffb139@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81786:e6e35c5fb24e Date: 2016-01-15 11:16 +0100 http://bitbucket.org/pypy/pypy/changeset/e6e35c5fb24e/ Log: Fix massive leak :-) diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -101,3 +101,4 @@ def leave_code(s): vmprof_tl_stack.setraw(s.c_next) + lltype.free(s, flavor='raw') From pypy.commits at gmail.com Fri Jan 15 05:26:55 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:26:55 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: ready to merge Message-ID: <5698c96f.8205c20a.ca9cc.ffffb29e@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2584:29d44ff276f1 Date: 2016-01-15 11:24 +0100 http://bitbucket.org/cffi/cffi/changeset/29d44ff276f1/ Log: ready to merge From pypy.commits at gmail.com Fri Jan 15 05:26:57 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:26:57 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge static-callback-embedding Message-ID: <5698c971.2a06c20a.ab727.ffffbb4f@mx.google.com> Author: Armin Rigo Branch: Changeset: r2585:fc7471ccde10 Date: 2016-01-15 11:26 +0100 http://bitbucket.org/cffi/cffi/changeset/fc7471ccde10/ Log: hg merge static-callback-embedding Embedding! diff too long, truncating to 2000 out of 2798 lines diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6354,7 +6354,7 @@ #endif struct _cffi_externpy_s; /* forward declaration */ -static void _cffi_call_python(struct _cffi_externpy_s *, char *args); +static void cffi_call_python(struct _cffi_externpy_s *, char *args); static void *cffi_exports[] = { NULL, @@ -6387,7 +6387,7 @@ _cffi_to_c__Bool, _prepare_pointer_call_argument, convert_array_from_object, - _cffi_call_python, + cffi_call_python, }; static struct { const char *name; int value; } all_dlopen_flags[] = { diff --git a/c/call_python.c b/c/call_python.c --- a/c/call_python.c +++ b/c/call_python.c @@ -94,7 +94,7 @@ return NULL; /* force _update_cache_to_call_python() to be called the next time - the C function invokes _cffi_call_python, to update the cache */ + the C function invokes cffi_call_python, to update the cache */ old1 = externpy->reserved1; externpy->reserved1 = Py_None; /* a non-NULL value */ Py_INCREF(Py_None); @@ -143,7 +143,15 @@ return 2; /* out of memory? */ } -static void _cffi_call_python(struct _cffi_externpy_s *externpy, char *args) +#if (defined(WITH_THREAD) && !defined(_MSC_VER) && \ + !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386)) +# define read_barrier() __sync_synchronize() +#else +# define read_barrier() (void)0 +#endif + +static void cffi_call_python(struct _cffi_externpy_s *externpy, char *args) { /* Invoked by the helpers generated from extern "Python" in the cdef. @@ -164,6 +172,21 @@ at least 8 bytes in size. */ int err = 0; + + /* This read barrier is needed for _embedding.h. It is paired + with the write_barrier() there. Without this barrier, we can + in theory see the following situation: the Python + initialization code already ran (in another thread), and the + '_cffi_call_python' function pointer directed execution here; + but any number of other data could still be seen as + uninitialized below. For example, 'externpy' would still + contain NULLs even though it was correctly set up, or + 'interpreter_lock' (the GIL inside CPython) would still be seen + as NULL, or 'autoInterpreterState' (used by + PyGILState_Ensure()) would be NULL or contain bogus fields. + */ + read_barrier(); + save_errno(); /* We need the infotuple here. We could always go through diff --git a/c/cffi1_module.c b/c/cffi1_module.c --- a/c/cffi1_module.c +++ b/c/cffi1_module.c @@ -3,7 +3,7 @@ #include "realize_c_type.c" #define CFFI_VERSION_MIN 0x2601 -#define CFFI_VERSION_MAX 0x26FF +#define CFFI_VERSION_MAX 0x27FF typedef struct FFIObject_s FFIObject; typedef struct LibObject_s LibObject; @@ -214,5 +214,12 @@ (PyObject *)lib) < 0) return NULL; +#if PY_MAJOR_VERSION >= 3 + /* add manually 'module_name' in sys.modules: it seems that + Py_InitModule() is not enough to do that */ + if (PyDict_SetItemString(modules_dict, module_name, m) < 0) + return NULL; +#endif + return m; } diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.3", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.4.3" +__version_info__ = (1, 4, 3) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_cffi_include.h b/cffi/_cffi_include.h --- a/cffi/_cffi_include.h +++ b/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/cffi/_embedding.h b/cffi/_embedding.h new file mode 100644 --- /dev/null +++ b/cffi/_embedding.h @@ -0,0 +1,517 @@ + +/***** Support code for embedding *****/ + +#if defined(_MSC_VER) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + +#if PY_MAJOR_VERSION >= 3 + /* see comments in _cffi_carefully_make_gil() about the + Python2/Python3 difference + */ +#else + /* Acquire the GIL. We have no threadstate here. If Python is + already initialized, it is possible that there is already one + existing for this thread, but it is not made current now. + */ + PyEval_AcquireLock(); + + _cffi_py_initialize(); + + /* The Py_InitializeEx() sometimes made a threadstate for us, but + not always. Indeed Py_InitializeEx() could be called and do + nothing. So do we have a threadstate, or not? We don't know, + but we can replace it with NULL in all cases. + */ + (void)PyThreadState_Swap(NULL); + + /* Now we can release the GIL and re-acquire immediately using the + logic of PyGILState(), which handles making or installing the + correct threadstate. + */ + PyEval_ReleaseLock(); +#endif + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", + PyThreadState_GET()->interp->builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.4.3" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + } + result = -1; + goto done; +} + +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + What it really does is completely different in Python 2 and + Python 3. + + Python 2 + ======== + + Initialize the GIL, without initializing the rest of Python, + by calling PyEval_InitThreads(). + + PyEval_InitThreads() must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. We choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + Python 3 + ======== + + In Python 3, PyEval_InitThreads() cannot be called before + Py_InitializeEx() any more. So this function calls + Py_InitializeEx() first. It uses the same obscure logic to + make sure we never call it concurrently. + + Arguably, this is less good on the spinlock, because + Py_InitializeEx() takes much longer to run than + PyEval_InitThreads(). But I didn't find a way around it. + */ + +#ifdef WITH_THREAD + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value; + + while (1) { /* spin loop */ + old_value = *lock; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, old_value + 1)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +#endif + +#if PY_MAJOR_VERSION >= 3 + /* Python 3: call Py_InitializeEx() */ + { + PyGILState_STATE state = PyGILState_UNLOCKED; + if (!Py_IsInitialized()) + _cffi_py_initialize(); + else + state = PyGILState_Ensure(); + + PyEval_InitThreads(); + PyGILState_Release(state); + } +#else + /* Python 2: call PyEval_InitThreads() */ +# ifdef WITH_THREAD + if (!PyEval_ThreadsInitialized()) { + PyEval_InitThreads(); /* makes the GIL */ + PyEval_ReleaseLock(); /* then release it */ + } + /* else: there is already a GIL, but we still needed to do the + spinlock dance to make sure that we see it as fully ready */ +# endif +#endif + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void (*func)(const void *[]); + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,25 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.get('libraries', []) + if pythonlib not in libraries: + kwds['libraries'] = libraries + [pythonlib] + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +620,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +663,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/demo/embedding.py b/demo/embedding.py new file mode 100644 --- /dev/null +++ b/demo/embedding.py @@ -0,0 +1,21 @@ +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add(int, int); +""") + +ffi.embedding_init_code(""" + from _embedding_cffi import ffi + print("preparing") # printed once + + @ffi.def_extern() + def add(x, y): + print("adding %d and %d" % (x, y)) + return x + y +""") + +ffi.set_source("_embedding_cffi", "") + +ffi.compile(verbose=True) diff --git a/demo/embedding_test.c b/demo/embedding_test.c new file mode 100644 --- /dev/null +++ b/demo/embedding_test.c @@ -0,0 +1,19 @@ +/* Link this program with libembedding_test.so. + E.g. with gcc: + + gcc -o embedding_test embedding_test.c _embedding_cffi*.so +*/ + +#include + +extern int add(int x, int y); + + +int main(void) +{ + int res = add(40, 2); + printf("result: %d\n", res); + res = add(100, -5); + printf("result: %d\n", res); + return 0; +} diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -138,6 +138,8 @@ for ``lib.__class__`` before version 1.4. +.. _cdef: + ffi.cdef(): declaring types and functions ----------------------------------------- diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.2' +release = '1.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst new file mode 100644 --- /dev/null +++ b/doc/source/embedding.rst @@ -0,0 +1,369 @@ +================================ +Using CFFI for embedding +================================ + +.. contents:: + +You can use CFFI to generate a ``.so/.dll`` which exports the API of +your choice to any C application that wants to link with this +``.so/.dll``. + +The general idea is as follows: + +* You write and execute a Python script, which produces a ``.so/.dll`` + file with the API of your choice. The script also gives some Python + code to be "frozen" inside the ``.so``. + +* At runtime, the C application loads this ``.so/.dll`` without having + to know that it was produced by Python and CFFI. + +* The first time a C function is called, Python is initialized and + the frozen Python code is executed. + +* The frozen Python code attaches Python functions that implement the + C functions of your API, which are then used for all subsequent C + function calls. + +One of the goals of this approach is to be entirely independent from +the CPython C API: no ``Py_Initialize()`` nor ``PyRun_SimpleString()`` +nor even ``PyObject``. It works identically on CPython and PyPy. + +.. note:: PyPy release 4.0.1 contains CFFI 1.4 only. + +This is entirely *new in version 1.5.* + + +Usage +----- + +.. __: overview.html#embedding + +See the `paragraph in the overview page`__ for a quick introduction. +In this section, we explain every step in more details. We will use +here this slightly expanded example: + +.. code-block:: c + + /* file plugin.h */ + typedef struct { int x, y; } point_t; + extern int do_stuff(point_t *); + +.. code-block:: python + + # file plugin_build.py + import cffi + ffi = cffi.FFI() + + with open('plugin.h') as f: + ffi.embedding_api(f.read()) + + ffi.set_source("my_plugin", ''' + #include "plugin.h" + ''') + + ffi.embedding_init_code(""" + from my_plugin import ffi + + @ffi.def_extern() + def do_stuff(p): + print("adding %d and %d" % (p.x, p.y)) + return p.x + p.y + """) + + ffi.compile(target="plugin-1.5.*", verbose=True) + +Running the code above produces a *DLL*, i,e, a dynamically-loadable +library. It is a file with the extension ``.dll`` on Windows or +``.so`` on other platforms. As usual, it is produced by generating +some intermediate ``.c`` code and then calling the regular +platform-specific C compiler. + +Here are some details about the methods used above: + +* **ffi.embedding_api(source):** parses the given C source, which + declares functions that you want to be exported by the DLL. It can + also declare types, constants and global variables that are part of + the C-level API of your DLL. + + The functions that are found in ``source`` will be automatically + defined in the ``.c`` file: they will contain code that initializes + the Python interpreter the first time any of them is called, + followed by code to call the attached Python function (with + ``@ffi.def_extern()``, see next point). + + The global variables, on the other hand, are not automatically + produced. You have to write their definition explicitly in + ``ffi.set_source()``, as regular C code (see the point after next). + +* **ffi.embedding_init_code(python_code):** this gives + initialization-time Python source code. This code is copied + ("frozen") inside the DLL. At runtime, the code is executed when + the DLL is first initialized, just after Python itself is + initialized. This newly initialized Python interpreter has got an + extra "built-in" module that can be loaded magically without + accessing any files, with a line like "``from my_plugin import ffi, + lib``". The name ``my_plugin`` comes from the first argument to + ``ffi.set_source()``. This module represents "the caller's C world" + from the point of view of Python. + + The initialization-time Python code can import other modules or + packages as usual. You may have typical Python issues like needing + to set up ``sys.path`` somehow manually first. + + For every function declared within ``ffi.embedding_api()``, the + initialization-time Python code or one of the modules it imports + should use the decorator ``@ffi.def_extern()`` to attach a + corresponding Python function to it. + + If the initialization-time Python code fails with an exception, then + you get a traceback printed to stderr, along with more information + to help you identify problems like wrong ``sys.path``. If some + function remains unattached at the time where the C code tries to + call it, an error message is also printed to stderr and the function + returns zero/null. + + Note that the CFFI module never calls ``exit()``, but CPython itself + contains code that calls ``exit()``, for example if importing + ``site`` fails. This may be worked around in the future. + +* **ffi.set_source(c_module_name, c_code):** set the name of the + module from Python's point of view. It also gives more C code which + will be included in the generated C code. In trivial examples it + can be an empty string. It is where you would ``#include`` some + other files, define global variables, and so on. The macro + ``CFFI_DLLEXPORT`` is available to this C code: it expands to the + platform-specific way of saying "the following declaration should be + exported from the DLL". For example, you would put "``extern int + my_glob;``" in ``ffi.embedding_api()`` and "``CFFI_DLLEXPORT int + my_glob = 42;``" in ``ffi.set_source()``. + + Currently, any *type* declared in ``ffi.embedding_api()`` must also + be present in the ``c_code``. This is automatic if this code + contains a line like ``#include "plugin.h"`` in the example above. + +* **ffi.compile([target=...] [, verbose=True]):** make the C code and + compile it. By default, it produces a file called + ``c_module_name.dll`` or ``c_module_name.so``, but the default can + be changed with the optional ``target`` keyword argument. You can + use ``target="foo.*"`` with a literal ``*`` to ask for a file called + ``foo.dll`` on Windows or ``foo.so`` elsewhere. One reason for + specifying an alternate ``target`` is to include characters not + usually allowed in Python module names, like "``plugin-1.5.*``". + + For more complicated cases, you can call instead + ``ffi.emit_c_code("foo.c")`` and compile the resulting ``foo.c`` + file using other means. CFFI's compilation logic is based on the + standard library ``distutils`` package, which is really developed + and tested for the purpose of making CPython extension modules, not + other DLLs. + + +More reading +------------ + +If you're reading this page about embedding and you are not familiar +with CFFI already, here are a few pointers to what you could read +next: + +* For the ``@ffi.def_extern()`` functions, integer C types are passed + simply as Python integers; and simple pointers-to-struct and basic + arrays are all straightforward enough. However, sooner or later you + will need to read about this topic in more details here__. + +* ``@ffi.def_extern()``: see `documentation here,`__ notably on what + happens if the Python function raises an exception. + +* To create Python objects attached to C data, one common solution is + to use ``ffi.new_handle()``. See documentation here__. + +* In embedding mode, the major direction is C code that calls Python + functions. This is the opposite of the regular extending mode of + CFFI, in which the major direction is Python code calling C. That's + why the page `Using the ffi/lib objects`_ talks first about the + latter, and why the direction "C code that calls Python" is + generally referred to as "callbacks" in that page. If you also + need to have your Python code call C code, read more about + `Embedding and Extending`_ below. + +* ``ffi.embedding_api(source)``: follows the same syntax as + ``ffi.cdef()``, `documented here.`__ You can use the "``...``" + syntax as well, although in practice it may be less useful than it + is for ``cdef()``. On the other hand, it is expected that often the + C sources that you need to give to ``ffi.embedding_api()`` would be + exactly the same as the content of some ``.h`` file that you want to + give to users of your DLL. That's why the example above does this:: + + with open('foo.h') as f: + ffi.embedding(f.read()) + + Note that a drawback of this approach is that ``ffi.embedding()`` + doesn't support ``#ifdef`` directives. You may have to use a more + convoluted expression like:: + + with open('foo.h') as f: + lines = [line for line in f if not line.startswith('#')] + ffi.embedding(''.join(lines)) + + As in the example above, you can also use the same ``foo.h`` from + ``ffi.set_source()``:: + + ffi.set_source('module_name', '#include "foo.h"') + + +.. __: using.html#working +.. __: using.html#def-extern +.. __: using.html#ffi-new_handle +.. __: cdef.html#cdef + +.. _`Using the ffi/lib objects`: using.html + + +Troubleshooting +--------------- + +The error message + + cffi extension module 'c_module_name' has unknown version 0x2701 + +means that the running Python interpreter located a CFFI version older +than 1.5. CFFI 1.5 or newer must be installed in the running Python. + + +Using multiple CFFI-made DLLs +----------------------------- + +Multiple CFFI-made DLLs can be used by the same process. + +Note that all CFFI-made DLLs in a process share a single Python +interpreter. The effect is the same as the one you get by trying to +build a large Python application by assembling a lot of unrelated +packages. Some of these might be libraries that monkey-patch some +functions from the standard library, for example, which might be +unexpected from other parts. + + +Multithreading +-------------- + +Multithreading should work transparently, based on Python's standard +Global Interpreter Lock. + +If two threads both try to call a C function when Python is not yet +initialized, then locking occurs. One thread proceeds with +initialization and blocks the other thread. The other thread will be +allowed to continue only when the execution of the initialization-time +Python code is done. + +If the two threads call two *different* CFFI-made DLLs, the Python +initialization itself will still be serialized, but the two pieces of +initialization-time Python code will not. The idea is that there is a +priori no reason for one DLL to wait for initialization of the other +DLL to be complete. + +After initialization, Python's standard Global Interpreter Lock kicks +in. The end result is that when one CPU progresses on executing +Python code, no other CPU can progress on executing more Python code +from another thread of the same process. At regular intervals, the +lock switches to a different thread, so that no single thread should +appear to block indefinitely. + + +Testing +------- + +For testing purposes, a CFFI-made DLL can be imported in a running +Python interpreter instead of being loaded like a C shared library. + +You might have some issues with the file name: for example, on +Windows, Python expects the file to be called ``c_module_name.pyd``, +but the CFFI-made DLL is called ``target.dll`` instead. The base name +``target`` is the one specified in ``ffi.compile()``, and on Windows +the extension is ``.dll`` instead of ``.pyd``. You have to rename or +copy the file, or on POSIX use a symlink. + +The module then works like a regular CFFI extension module. It is +imported with "``from c_module_name import ffi, lib``" and exposes on +the ``lib`` object all C functions. You can test it by calling these +C functions. The initialization-time Python code frozen inside the +DLL is executed the first time such a call is done. + + +Embedding and Extending +----------------------- + +The embedding mode is not incompatible with the non-embedding mode of +CFFI. + +You can use *both* ``ffi.embedding_api()`` and ``ffi.cdef()`` in the +same build script. You put in the former the declarations you want to +be exported by the DLL; you put in the latter only the C functions and +types that you want to share between C and Python, but not export from +the DLL. + +As an example of that, consider the case where you would like to have +a DLL-exported C function written in C directly, maybe to handle some +cases before calling Python functions. To do that, you must *not* put +the function's signature in ``ffi.embedding_api()``. (Note that this +requires more hacks if you use ``ffi.embedding(f.read())``.) You must +only write the custom function definition in ``ffi.set_source()``, and +prefix it with the macro CFFI_DLLEXPORT: + +.. code-block:: c + + CFFI_DLLEXPORT int myfunc(int a, int b) + { + /* implementation here */ + } + +This function can, if it wants, invoke Python functions using the +general mechanism of "callbacks"---called this way because it is a +call from C to Python, although in this case it is not calling +anything back: + +.. code-block:: python + + ffi.cdef(""" + extern "Python" int mycb(int); + """) + + ffi.set_source("my_plugin", """ + + static int mycb(int); /* the callback: forward declaration, to make + it accessible from the C code that follows */ + + CFFI_DLLEXPORT int myfunc(int a, int b) + { + int product = a * b; /* some custom C code */ + return mycb(product); + } + """) + +and then the Python initialization code needs to contain the lines: + +.. code-block:: python + + @ffi.def_extern() + def mycb(x): + print "hi, I'm called with x =", x + return x * 10 + +This ``@ffi.def_extern`` is attaching a Python function to the C +callback ``mycb()``, which in this case is not exported from the DLL. +Nevertheless, the automatic initialization of Python occurs when +``mycb()`` is called, if it happens to be the first function called +from C. More precisely, it does not happen when ``myfunc()`` is +called: this is just a C function, with no extra code magically +inserted around it. It only happens when ``myfunc()`` calls +``mycb()``. + +As the above explanation hints, this is how ``ffi.embedding_api()`` +actually implements function calls that directly invoke Python code; +here, we have merely decomposed it explicitly, in order to add some +custom C code in the middle. + +In case you need to force, from C code, Python to be initialized +before the first ``@ffi.def_extern()`` is called, you can do so by +calling the C function ``cffi_start_python()`` with no argument. It +returns an integer, 0 or -1, to tell if the initialization succeeded +or not. Currently there is no way to prevent a failing initialization +from also dumping a traceback and more information to stderr. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -18,6 +18,7 @@ overview using cdef + embedding Goals diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.3.tar.gz - - MD5: 81357fe5042d00650b85b728cc181df2 + - MD5: ... - - SHA: 76cff6f1ff5bfb2b9c6c8e2cfa8bf90b5c944394 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/doc/source/overview.rst b/doc/source/overview.rst --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -287,6 +287,54 @@ distributed in precompiled form like any other extension module.* +.. _embedding: + +Embedding +--------- + +*New in version 1.5.* + +CFFI can be used for embedding__: creating a standard +dynamically-linked library (``.dll`` under Windows, ``.so`` elsewhere) +which can be used from a C application. + +.. code-block:: python + + import cffi + ffi = cffi.FFI() + + ffi.embedding_api(""" + int do_stuff(int, int); + """) + + ffi.set_source("my_plugin", "") + + ffi.embedding_init_code(""" + from my_plugin import ffi + + @ffi.def_extern() + def do_stuff(x, y): + print("adding %d and %d" % (x, y)) + return x + y + """) + + ffi.compile(target="plugin-1.5.*", verbose=True) + +This simple example creates ``plugin-1.5.dll`` or ``plugin-1.5.so`` as +a DLL with a single exported function, ``do_stuff()``. You execute +the script above once, with the interpreter you want to have +internally used; it can be CPython 2.x or 3.x or PyPy. This DLL can +then be used "as usual" from an application; the application doesn't +need to know that it is talking with a library made with Python and +CFFI. At runtime, when the application calls ``int do_stuff(int, +int)``, the Python interpreter is automatically initialized and ``def +do_stuff(x, y):`` gets called. `See the details in the documentation +about embedding.`__ + +.. __: embedding.html +.. __: embedding.html + + What actually happened? ----------------------- diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -423,6 +423,7 @@ with ``int foo();`` really means ``int foo(void);``.) +.. _extern-python: .. _`extern "Python"`: Extern "Python" (new-style callbacks) @@ -603,6 +604,7 @@ } """) + Extern "Python": reference ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -629,6 +631,8 @@ return a default value. This can be controlled with ``error`` and ``onerror``, described below. +.. _def-extern: + The ``@ffi.def_extern()`` decorator takes these optional arguments: * ``name``: the name of the function as written in the cdef. By default @@ -1066,12 +1070,13 @@ points in time, and using it in a ``with`` statement. +.. _ffi-new_handle: .. _`ffi.new_handle()`: **ffi.new_handle(python_object)**: return a non-NULL cdata of type ``void *`` that contains an opaque reference to ``python_object``. You can pass it around to C functions or store it into C structures. Later, -you can use **ffi.from_handle(p)** to retrive the original +you can use **ffi.from_handle(p)** to retrieve the original ``python_object`` from a value with the same ``void *`` pointer. *Calling ffi.from_handle(p) is invalid and will likely crash if the cdata object returned by new_handle() is not kept alive!* diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,9 +144,10 @@ `Mailing list `_ """, - version='1.4.2', + version='1.4.3', packages=['cffi'] if cpython else [], - package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} + package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', + '_embedding.h']} if cpython else {}, zip_safe=False, diff --git a/testing/cffi0/test_version.py b/testing/cffi0/test_version.py --- a/testing/cffi0/test_version.py +++ b/testing/cffi0/test_version.py @@ -53,3 +53,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -59,11 +59,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -208,6 +213,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") diff --git a/testing/embedding/__init__.py b/testing/embedding/__init__.py new file mode 100644 diff --git a/testing/embedding/add1-test.c b/testing/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/testing/embedding/add1.py b/testing/embedding/add1.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add1.py @@ -0,0 +1,33 @@ +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") + + from _add1_cffi import ffi + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add2-test.c b/testing/embedding/add2-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add2-test.c @@ -0,0 +1,14 @@ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/testing/embedding/add2.py b/testing/embedding/add2.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add2.py @@ -0,0 +1,29 @@ +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add2(int, int, int); +""") + +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") + + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + ffi = _add2_cffi.ffi + + @ffi.def_extern() + def add2(x, y, z): + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add3.py b/testing/embedding/add3.py new file mode 100644 --- /dev/null +++ b/testing/embedding/add3.py @@ -0,0 +1,24 @@ +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add3(int, int, int, int); +""") + +ffi.embedding_init_code(r""" + from _add3_cffi import ffi + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/testing/embedding/add_recursive-test.c b/testing/embedding/add_recursive-test.c new file mode 100644 --- /dev/null +++ b/testing/embedding/add_recursive-test.c @@ -0,0 +1,27 @@ +#include + +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + fflush(stdout); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); From pypy.commits at gmail.com Fri Jan 15 05:30:27 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:30:27 -0800 (PST) Subject: [pypy-commit] cffi default: Bump version number here Message-ID: <5698ca43.85e41c0a.93cdb.ffffa6a3@mx.google.com> Author: Armin Rigo Branch: Changeset: r2586:16566febd6da Date: 2016-01-15 11:29 +0100 http://bitbucket.org/cffi/cffi/changeset/16566febd6da/ Log: Bump version number here diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.4.2" +#define CFFI_VERSION "1.4.3" #ifdef MS_WIN32 #include From pypy.commits at gmail.com Fri Jan 15 05:46:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 02:46:44 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: implemented stacklet switch (slp_switch) for s390x Message-ID: <5698ce14.ea5ec20a.4bd55.ffffbc4d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81787:9eab6627968e Date: 2016-01-15 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/9eab6627968e/ Log: implemented stacklet switch (slp_switch) for s390x diff --git a/rpython/translator/c/src/stacklet/slp_platformselect.h b/rpython/translator/c/src/stacklet/slp_platformselect.h --- a/rpython/translator/c/src/stacklet/slp_platformselect.h +++ b/rpython/translator/c/src/stacklet/slp_platformselect.h @@ -14,7 +14,7 @@ #include "switch_ppc64_gcc.h" /* gcc on ppc64 */ #elif defined(__GNUC__) && defined(__mips__) && defined(_ABI64) #include "switch_mips64_gcc.h" /* gcc on mips64 */ -#elif defined(__GNUC__) && defined(__s390x__) && defined(_ABI64) +#elif defined(__GNUC__) && defined(__s390x__) #include "switch_s390x_gcc.h" #else #error "Unsupported platform!" diff --git a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h --- a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h @@ -1,14 +1,3 @@ -#if !(defined(__LITTLE_ENDIAN__) ^ defined(__BIG_ENDIAN__)) -# error "cannot determine if it is ppc64 or ppc64le" -#endif - -#ifdef __BIG_ENDIAN__ -# define TOC_AREA "40" -#else -# define TOC_AREA "24" -#endif - - /* This depends on these attributes so that gcc generates a function with no code before the asm, and only "blr" after. */ static __attribute__((noinline, optimize("O2"))) @@ -36,67 +25,40 @@ "std 4,144(15)\n" "std 6,152(15)\n" - "lay 15,-160(15)\n" /* Create stack frame */ + "lay 15,-160(15)\n" /* Create stack frame */ "lgr 10, %[restore_state]\n" /* save 'restore_state' for later */ "lgr 11, %[extra]\n" /* save 'extra' for later */ "lgr 14, %[save_state]\n" /* move 'save_state' into r14 for branching */ - "mr 2, 15\n" /* arg 1: current (old) stack pointer */ - "mr 3, 11\n" /* arg 2: extra */ + "lgr 2, 15\n" /* arg 1: current (old) stack pointer */ + "lgr 3, 11\n" /* arg 2: extra */ - "stdu 1, -48(1)\n" /* create temp stack space (see below) */ -#ifdef __BIG_ENDIAN__ - "ld 0, 0(12)\n" - "ld 11, 16(12)\n" - "mtctr 0\n" - "ld 2, 8(12)\n" -#else - "mtctr 12\n" /* r12 is fixed by this ABI */ -#endif - "bctrl\n" /* call save_state() */ - "addi 1, 1, 48\n" /* destroy temp stack space */ + "lay 15, -160(15)\n" /* create temp stack space (see below) */ + "basr 14, 14\n" /* call save_state() */ + "lay 15, 160(15)\n" /* destroy temp stack space */ - "CGIJ 2, 0, 7, zero\n" /* skip the rest if the return value is null */ + "cgij 2, 0, 8, zero\n" /* skip the rest if the return value is null */ - "lgr 15, 2\n" /* change the stack pointer */ - /* From now on, the stack pointer is modified, but the content of the + "lgr 15, 2\n" /* change the stack pointer */ + + /* From now on, the stack pointer is modified, but the content of the stack is not restored yet. It contains only garbage here. */ + /* arg 1: current (new) stack pointer + is already in r2 */ + "lgr 3, 11\n" /* arg 2: extra */ - "mr 4, 15\n" /* arg 2: extra */ - /* arg 1: current (new) stack pointer - is already in r3 */ - "stdu 1, -48(1)\n" /* create temp stack space for callee to use */ - /* ^^^ we have to be careful. The function call will store the link - register in the current frame (as the ABI) dictates. But it will - then trample it with the restore! We fix this by creating a fake - stack frame */ - -#ifdef __BIG_ENDIAN__ - "ld 0, 0(14)\n" /* 'restore_state' is in r14 */ - "ld 11, 16(14)\n" - "mtctr 0\n" - "ld 2, 8(14)\n" -#endif -#ifdef __LITTLE_ENDIAN__ - "mr 12, 14\n" /* copy 'restore_state' */ - "mtctr 12\n" /* r12 is fixed by this ABI */ -#endif - - "bctrl\n" /* call restore_state() */ - "addi 1, 1, 48\n" /* destroy temp stack space */ + "lay 15, -160(15)\n" /* create temp stack space for callee to use */ + "lgr 14, 10\n" /* load restore_state */ + "basr 14, 14\n" /* call restore_state() */ + "lay 15, 160(15)\n" /* destroy temp stack space */ /* The stack's content is now restored. */ "zero:\n" /* Epilogue */ - - // "mtcrf 0xff, 12\n" - - // "addi 1,1,528\n" - - "lay 15,160(15)\n" /* restore stack pointer */ + /* no need */ /* restore stack pointer */ "ld 0,128(15)\n" "ld 2,136(15)\n" From pypy.commits at gmail.com Fri Jan 15 05:58:13 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 02:58:13 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix tests Message-ID: <5698d0c5.2a06c20a.ab727.ffffc74f@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81788:bcf38eb09571 Date: 2016-01-15 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/bcf38eb09571/ Log: fix tests diff --git a/rpython/rlib/rvmprof/src/rvmprof.c b/rpython/rlib/rvmprof/src/rvmprof.c --- a/rpython/rlib/rvmprof/src/rvmprof.c +++ b/rpython/rlib/rvmprof/src/rvmprof.c @@ -12,6 +12,8 @@ #else # include "common_header.h" +# include "structdef.h" +# include "src/threadlocal.h" # include "rvmprof.h" /*# ifndef VMPROF_ADDR_OF_TRAMPOLINE # error "RPython program using rvmprof, but not calling vmprof_execute_code()" diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -126,12 +126,18 @@ * ************************************************************* */ -#include "src/threadlocal.h" +#ifndef RPYTHON_LL2CTYPES static vmprof_stack_t *get_vmprof_stack(void) { return RPY_THREADLOCALREF_GET(vmprof_tl_stack); } +#else +static vmprof_stack_t *get_vmprof_stack(void) +{ + return 0; +} +#endif static int get_stack_trace(intptr_t *result, int max_depth, intptr_t pc, ucontext_t *ucontext) { diff --git a/rpython/rlib/rvmprof/src/vmprof_stack.h b/rpython/rlib/rvmprof/src/vmprof_stack.h --- a/rpython/rlib/rvmprof/src/vmprof_stack.h +++ b/rpython/rlib/rvmprof/src/vmprof_stack.h @@ -1,3 +1,5 @@ +#ifndef _VMPROF_STACK_H_ +#define _VMPROF_STACK_H_ #include @@ -19,3 +21,5 @@ // to worry too much. There is a potential for squeezing it with bit // patterns into one WORD, but I don't want to care RIGHT NOW, potential // for future optimization potential + +#endif diff --git a/rpython/rlib/rvmprof/test/test_ztranslation.py b/rpython/rlib/rvmprof/test/test_ztranslation.py --- a/rpython/rlib/rvmprof/test/test_ztranslation.py +++ b/rpython/rlib/rvmprof/test/test_ztranslation.py @@ -64,8 +64,14 @@ def test_interpreted(): # takes forever if the Python process is already big... import subprocess - subprocess.check_call([sys.executable, os.path.basename(__file__)], - cwd=(os.path.dirname(__file__) or '.')) + me = os.path.basename(__file__) + if me.endswith('pyc') or me.endswith('pyo'): + me = me[:-1] + env = os.environ.copy() + env['PYTHONPATH'] = '' + subprocess.check_call([sys.executable, me], + cwd=(os.path.dirname(__file__) or '.'), + env=env) def test_compiled(): fn = compile(main, [], gcpolicy="minimark") From pypy.commits at gmail.com Fri Jan 15 06:36:10 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 03:36:10 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Fix the jit to use thread-locals Message-ID: <5698d9aa.aa5dc20a.6c0f2.ffffcd17@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81789:195b2ba3887b Date: 2016-01-15 12:35 +0100 http://bitbucket.org/pypy/pypy/changeset/195b2ba3887b/ Log: Fix the jit to use thread-locals diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py --- a/rpython/jit/backend/test/test_rvmprof.py +++ b/rpython/jit/backend/test/test_rvmprof.py @@ -1,22 +1,22 @@ - +import py from rpython.rlib import jit from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.rvmprof import _get_vmprof +from rpython.rlib.rvmprof import cintf from rpython.jit.backend.x86.arch import WORD from rpython.jit.codewriter.policy import JitPolicy class BaseRVMProfTest(object): def test_one(self): + py.test.skip("needs thread-locals in the JIT, which is only available " + "after translation") visited = [] def helper(): - stackp = _get_vmprof().cintf.vmprof_address_of_global_stack()[0] - if stackp: + stack = cintf.vmprof_tl_stack.getraw() + if stack: # not during tracing - stack = rffi.cast(rffi.CArrayPtr(lltype.Signed), stackp) - item = rffi.cast(rffi.CArrayPtr(lltype.Signed), stack[1] - WORD)[0] - visited.append(item) + visited.append(stack.c_value) else: visited.append(0) @@ -37,12 +37,13 @@ hooks = Hooks() - stackp = _get_vmprof().cintf.vmprof_address_of_global_stack() - stackp[0] = 0 # make it empty + null = lltype.nullptr(cintf.VMPROFSTACK) + cintf.vmprof_tl_stack.setraw(null) # make it empty self.meta_interp(f, [10], policy=JitPolicy(hooks)) v = set(visited) assert 0 in v v.remove(0) assert len(v) == 1 assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024 - assert stackp[0] == 0 # make sure we didn't leave anything dangling + assert cintf.vmprof_tl_stack.getraw() == null + # ^^^ make sure we didn't leave anything dangling diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -31,7 +31,7 @@ if WORD == 4: # ebp + ebx + esi + edi + 15 extra words = 19 words - FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact! PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, @@ -41,7 +41,7 @@ THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 - FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact! PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -40,7 +40,6 @@ from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id -from rpython.rlib.rvmprof.rvmprof import _get_vmprof, VMPROF_JITTED_TAG class Assembler386(BaseAssembler, VectorAssemblerMixin): @@ -839,28 +838,49 @@ return frame_depth def _call_header_vmprof(self): - stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) + from rpython.rlib.rvmprof.rvmprof import cintf, VMPROF_JITTED_TAG + + # tloc = address of pypy_threadlocal_s + if IS_X86_32: + # Can't use esi here, its old value is not saved yet. + # But we can use eax and ecx. + self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) + tloc = edx + old = ecx + else: + # The thread-local value is already in esi. + # We should avoid if possible to use ecx or edx because they + # would be used to pass arguments #3 and #4 (even though, so + # far, the assembler only receives two arguments). + tloc = esi + old = r11 + # eax = address in the stack of a 3-words struct vmprof_stack_s self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) - # next - self.mc.MOV(ecx, heap(stack)) - self.mc.MOV_mr((eax.value, 0), ecx.value) - # value + # old = current value of vmprof_tl_stack + self.mc.MOV_rm(old.value, (tloc.value, cintf.vmprof_tl_stack.offset)) + # eax->next = old + self.mc.MOV_mr((eax.value, 0), old.value) + # eax->value = my esp self.mc.MOV_mr((eax.value, WORD), esp.value) - # kind + # eax->kind = VMPROF_JITTED_TAG self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) - self.mc.MOV(heap(stack), eax) + # save in vmprof_tl_stack the new eax + self.mc.MOV_mr((tloc.value, cintf.vmprof_tl_stack.offset), eax.value) def _call_footer_vmprof(self): - stack = rffi.cast(lltype.Signed, _get_vmprof().cintf.vmprof_address_of_global_stack()) - # *stack = stack->next - self.mc.MOV(eax, heap(stack)) - self.mc.MOV_rm(eax.value, (eax.value, 0)) - self.mc.MOV(heap(stack), eax) + from rpython.rlib.rvmprof.rvmprof import cintf + # edx = address of pypy_threadlocal_s + self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) + # eax = (our local vmprof_tl_stack).next + self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) + # save in vmprof_tl_stack the value eax + self.mc.MOV_mr((edx.value, cintf.vmprof_tl_stack.offset), eax.value) def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) - self._call_header_vmprof() + if self.cpu.translate_support_code: + self._call_header_vmprof() # on X86_64, this uses esi if IS_X86_64: self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) self.mc.MOV_rr(ebp.value, edi.value) @@ -894,7 +914,8 @@ def _call_footer(self): # the return value is the jitframe - self._call_footer_vmprof() + if self.cpu.translate_support_code: + self._call_footer_vmprof() self.mc.MOV_rr(eax.value, ebp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -57,9 +57,6 @@ [rffi.INT], lltype.Void, compilation_info=eci, _nowrapper=True) - vmprof_address_of_global_stack = rffi.llexternal( - "vmprof_address_of_global_stack", [], rffi.CArrayPtr(lltype.Signed), - compilation_info=eci, _nowrapper=True) return CInterface(locals()) From pypy.commits at gmail.com Fri Jan 15 06:44:08 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 03:44:08 -0800 (PST) Subject: [pypy-commit] pypy default: Be clearer about that Message-ID: <5698db88.247bc20a.2f1f.ffffcf0a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81790:a4551f2871a0 Date: 2016-01-15 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/a4551f2871a0/ Log: Be clearer about that diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -666,6 +666,7 @@ self.rm._sync_var(op.getarg(1)) return [self.loc(op.getarg(0)), self.fm.loc(op.getarg(1))] else: + assert op.numargs() == 1 return [self.loc(op.getarg(0))] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3649,6 +3649,8 @@ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = int_add(i0, 42) i11 = call_assembler_i(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, descr=looptoken) + # NOTE: call_assembler_i() is turned into a single-argument version + # by rewrite.py guard_not_forced()[] finish(i11) ''' From pypy.commits at gmail.com Fri Jan 15 06:59:40 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 03:59:40 -0800 (PST) Subject: [pypy-commit] pypy default: Fix bogus assembler generated in a case that never occurs in practice Message-ID: <5698df2c.d69c1c0a.e95bf.ffffcc2c@mx.google.com> Author: Armin Rigo Branch: Changeset: r81791:664ecc23f550 Date: 2016-01-15 12:58 +0100 http://bitbucket.org/pypy/pypy/changeset/664ecc23f550/ Log: Fix bogus assembler generated in a case that never occurs in practice diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2067,7 +2067,9 @@ if IS_X86_64: tmploc = esi # already the correct place if argloc is tmploc: - self.mc.MOV_rr(esi.value, edi.value) + # this case is theoretical only so far: in practice, + # argloc is always eax, never esi + self.mc.MOV_rr(edi.value, esi.value) argloc = edi else: tmploc = eax From pypy.commits at gmail.com Fri Jan 15 08:04:47 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 05:04:47 -0800 (PST) Subject: [pypy-commit] cffi default: bump version number to 1.5.0 Message-ID: <5698ee6f.88d31c0a.3ad3.ffffe293@mx.google.com> Author: Armin Rigo Branch: Changeset: r2587:6a062c09a25b Date: 2016-01-15 14:04 +0100 http://bitbucket.org/cffi/cffi/changeset/6a062c09a25b/ Log: bump version number to 1.5.0 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.4.3" +#define CFFI_VERSION "1.5.0" #ifdef MS_WIN32 #include diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.3", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.3" -__version_info__ = (1, 4, 3) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.4.3" + "\ncompiled with cffi version: 1.5.0" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.4' +version = '1.5' # The full version, including alpha/beta/rc tags. -release = '1.4.3' +release = '1.5.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,7 +51,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.3.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.5.0.tar.gz - MD5: ... diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.3', + version='1.5.0', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', '_embedding.h']} From pypy.commits at gmail.com Fri Jan 15 08:10:21 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 05:10:21 -0800 (PST) Subject: [pypy-commit] cffi default: What's New Message-ID: <5698efbd.cb571c0a.a54d.ffffe0c9@mx.google.com> Author: Armin Rigo Branch: Changeset: r2588:4ce79f707838 Date: 2016-01-15 14:10 +0100 http://bitbucket.org/cffi/cffi/changeset/4ce79f707838/ Log: What's New diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,14 @@ ====================== +v1.5.0 +====== + +* Support for `using CFFI for embedding`__. + +.. __: embedding.html + + v1.4.2 ====== From pypy.commits at gmail.com Fri Jan 15 08:16:09 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 05:16:09 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: update to cffi/4ce79f707838 Message-ID: <5698f119.4f911c0a.6beed.ffffe839@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81792:e5825665a3ae Date: 2016-01-15 14:15 +0100 http://bitbucket.org/pypy/pypy/changeset/e5825665a3ae/ Log: update to cffi/4ce79f707838 diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.3 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.3" -__version_info__ = (1, 4, 3) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,7 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None - self._embedding_init_code = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -94,7 +94,7 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False, packed=False, dllexport=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. @@ -102,14 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed, - dllexport=dllexport) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -535,6 +542,25 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.get('libraries', []) + if pythonlib not in libraries: + kwds['libraries'] = libraries + [pythonlib] + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -594,14 +620,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -629,17 +664,30 @@ return result def embedding_init_code(self, pysource): - if self._embedding_init_code is not None: + if self._embedding: raise ValueError("embedding_init_code() can only be called once") - # check for SyntaxErrors, at least, and automatically add a - # "if 1:" line in front of the code if the whole pysource is - # indented - try: - compile(pysource, "cffi_init", "exec") - except IndentationError: - pysource = 'if 1:\n' + pysource - compile(pysource, "cffi_init", "exec") - self._embedding_init_code = pysource + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource def _load_backend_lib(backend, name, flags): diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -374,11 +374,10 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - if self._options['dllexport']: - tag = 'dllexport_python ' - else: - tag = 'extern_python ' + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: tag = 'function ' self._declare(tag + decl.name, tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -282,13 +282,13 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # - # if we have ffi._embedding_init_code, we give it here as a macro + # if we have ffi._embedding != None, we give it here as a macro # and include an extra file base_module_name = self.module_name.split('.')[-1] - if self.ffi._embedding_init_code is not None: + if self.ffi._embedding is not None: prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % - (self._string_literal(self.ffi._embedding_init_code),)) + (self._string_literal(self.ffi._embedding),)) prnt('#ifdef PYPY_VERSION') prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( base_module_name,)) @@ -1359,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1373,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.3" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.3", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -1719,3 +1719,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py @@ -60,11 +60,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -209,6 +214,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") From pypy.commits at gmail.com Fri Jan 15 09:19:08 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 06:19:08 -0800 (PST) Subject: [pypy-commit] cffi default: Delay changing this to 1.6 (1.5 is very soon after 1.4) Message-ID: <5698ffdc.6217c20a.f13ef.0a5f@mx.google.com> Author: Armin Rigo Branch: Changeset: r2589:abd64e2e97cb Date: 2016-01-15 15:18 +0100 http://bitbucket.org/cffi/cffi/changeset/abd64e2e97cb/ Log: Delay changing this to 1.6 (1.5 is very soon after 1.4) diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1352,8 +1352,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 5): - py.test.skip("re-enable me in version 1.5") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " From pypy.commits at gmail.com Fri Jan 15 09:33:49 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:49 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: avoid segfault by detecting the problem in c, move the python-level test to fail first Message-ID: <5699034d.17941c0a.e7f46.0074@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81793:c830075b9c39 Date: 2016-01-11 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c830075b9c39/ Log: avoid segfault by detecting the problem in c, move the python-level test to fail first diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -633,9 +633,9 @@ static char *msg = "already has a docstring"; PyObject *tp_dict = footype.tp_dict; PyObject *myobj; - static PyTypeObject *PyMemberDescr_TypePtr = NULL; - static PyTypeObject *PyGetSetDescr_TypePtr = NULL; - static PyTypeObject *PyMethodDescr_TypePtr = NULL; + static PyTypeObject *PyMemberDescr_TypePtr = NULL; /* a PyMemberDescr_Type* */ + static PyTypeObject *PyGetSetDescr_TypePtr = NULL; /* a PyGetSetDescr_Type* */ + static PyTypeObject *PyMethodDescr_TypePtr = NULL; /* a PyClassMethodDescr_Type* */ /* Don't add docstrings */ if (Py_OptimizeFlag > 1) { @@ -661,7 +661,15 @@ PyMethodDescr_TypePtr = Py_TYPE(myobj); } } - + if (PyMethodDescr_TypePtr == PyMemberDescr_TypePtr || + PyMethodDescr_TypePtr == PyGetSetDescr_TypePtr || + PyMemberDescr_TypePtr == PyGetSetDescr_TypePtr) + { + PyErr_Format(PyExc_RuntimeError, + "at least two of the 'Py{Method,Member,GetSet}Descr_Type's are the same\n" + "(in add_docstring %s %d)", __FILE__, __LINE__); + return NULL; + } if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str)) { return NULL; } @@ -686,6 +694,7 @@ _ADDDOC(Type, new->tp_doc, new->tp_name); } else if (_TESTDOC2(MemberDescr)) { + /* docless_member ends up here */ _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); } else if (_TESTDOC2(GetSetDescr)) { diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -55,9 +55,9 @@ raises(SystemError, "obj.broken_member = 42") assert module.fooType.broken_member.__doc__ is None assert module.fooType.object_member.__doc__ == "A Python object." + assert str(type(module.fooType.int_member)) == "" module.add_docstring(module.fooType.docless_member, "docstring for docless_member") assert module.fooType.docless_member.__doc__ == "docstring for docless_member" - assert str(type(module.fooType.int_member)) == "" def test_typeobject_object_member(self): module = self.import_module(name='foo') From pypy.commits at gmail.com Fri Jan 15 09:33:50 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:50 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: make the header compatible with cpython Message-ID: <5699034e.cf821c0a.68ca1.034e@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81794:0309d3b5553a Date: 2016-01-15 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/0309d3b5553a/ Log: make the header compatible with cpython diff --git a/pypy/module/cpyext/include/descrobject.h b/pypy/module/cpyext/include/descrobject.h --- a/pypy/module/cpyext/include/descrobject.h +++ b/pypy/module/cpyext/include/descrobject.h @@ -12,13 +12,13 @@ } PyGetSetDef; -#define PyDescr_COMMON PyDescrObject d_common +#define PyDescr_COMMON \ + PyObject_HEAD \ + PyTypeObject *d_type; \ + PyObject *d_name typedef struct { - PyObject_HEAD - PyTypeObject *d_type; - PyObject *d_name; - PyObject *d_qualname; + PyDescr_COMMON; } PyDescrObject; typedef struct { @@ -28,12 +28,18 @@ typedef struct { PyDescr_COMMON; + struct PyMemberDef *d_member; +} PyMemberDescrObject; + +typedef struct { + PyDescr_COMMON; PyGetSetDef *d_getset; } PyGetSetDescrObject; typedef struct { PyDescr_COMMON; - struct PyMemberDef *d_member; -} PyMemberDescrObject; + struct wrapperbase *d_base; + void *d_wrapped; /* This can be any function pointer */ +} PyWrapperDescrObject; #endif From pypy.commits at gmail.com Fri Jan 15 09:33:52 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:52 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: export only what needs to be exported Message-ID: <56990350.6953c20a.5d168.0e9f@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81795:e7a618d391f3 Date: 2016-01-15 15:42 +0200 http://bitbucket.org/pypy/pypy/changeset/e7a618d391f3/ Log: export only what needs to be exported diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + mangle_name) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -364,6 +365,7 @@ def build_slot_tp_function(space, typedef, name): w_type = space.gettypeobject(typedef) + external = mangle_name('', typedef.name) is not None if name == 'tp_setattro': setattr_fn = w_type.getdictvalue(space, '__setattr__') delattr_fn = w_type.getdictvalue(space, '__delattr__') @@ -371,7 +373,7 @@ return @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, - error=-1, external=True) # XXX should not be exported + error=-1, external=external) @func_renamer("cpyext_tp_setattro_%s" % (typedef.name,)) def slot_tp_setattro(space, w_self, w_name, w_value): if w_value is not None: @@ -386,7 +388,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - external=True) + external=external) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) From pypy.commits at gmail.com Fri Jan 15 09:33:54 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:54 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add a TypeDef to W_MemberDesc with the corrent name Message-ID: <56990352.8673c20a.08fa.1034@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81796:aced3aac84e6 Date: 2016-01-15 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/aced3aac84e6/ Log: add a TypeDef to W_MemberDesc with the corrent name diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -8,7 +8,8 @@ from pypy.interpreter.baseobjspace import W_Root, DescrMismatch from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import GetSetProperty +from pypy.interpreter.typedef import (GetSetProperty, TypeDef, + interp_attrproperty, interp_attrproperty, interp2app) from pypy.module.__builtin__.abstractinst import abstract_issubclass_w from pypy.module.cpyext import structmemberdefs from pypy.module.cpyext.api import ( @@ -69,6 +70,7 @@ return space.wrap(W_GetSetPropertyEx(getset, w_type)) class W_MemberDescr(GetSetProperty): + name = 'member_descriptor' def __init__(self, member, w_type): self.member = member self.name = rffi.charp2str(member.c_name) @@ -85,6 +87,18 @@ cls=None, use_closure=True, tag="cpyext_2") +# change the typedef name +W_MemberDescr.typedef = TypeDef( + "member_descriptor", + __get__ = interp2app(GetSetProperty.descr_property_get), + __set__ = interp2app(GetSetProperty.descr_property_set), + __delete__ = interp2app(GetSetProperty.descr_property_del), + __name__ = interp_attrproperty('name', cls=GetSetProperty), + __objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass), + __doc__ = interp_attrproperty('doc', cls=GetSetProperty), + ) +assert not W_MemberDescr.typedef.acceptable_as_base_class # no __new__ + def convert_getset_defs(space, dict_w, getsets, w_type): getsets = rffi.cast(rffi.CArrayPtr(PyGetSetDef), getsets) if getsets: From pypy.commits at gmail.com Fri Jan 15 09:33:58 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:58 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: rewrite the test, do not modify a readonly attribute; stcmp() instead. Exempt checking for getset, type objects Message-ID: <56990356.84e31c0a.70bdc.000c@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81798:007d50e8afde Date: 2016-01-15 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/007d50e8afde/ Log: rewrite the test, do not modify a readonly attribute; stcmp() instead. Exempt checking for getset, type objects diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -20,7 +20,6 @@ long long foo_longlong; unsigned long long foo_ulonglong; Py_ssize_t foo_ssizet; - PyObject * foo_docless; } fooobject; static PyTypeObject footype; @@ -185,7 +184,6 @@ {"longlong_member", T_LONGLONG, offsetof(fooobject, foo_longlong), 0, NULL}, {"ulonglong_member", T_ULONGLONG, offsetof(fooobject, foo_ulonglong), 0, NULL}, {"ssizet_member", T_PYSSIZET, offsetof(fooobject, foo_ssizet), 0, NULL}, - {"docless_member", T_OBJECT, offsetof(fooobject, foo_docless), READONLY, NULL}, {NULL} /* Sentinel */ }; @@ -625,25 +623,19 @@ (destructor)custom_dealloc, /*tp_dealloc*/ }; -static PyObject * add_docstring(PyObject * self, PyObject * args) +static PyObject * cmp_docstring(PyObject * self, PyObject * args) { PyObject *obj; PyObject *str; - char *docstr; - static char *msg = "already has a docstring"; + char *docstr, *attr_as_str; + static char *msg = "has no docstring"; PyObject *tp_dict = footype.tp_dict; PyObject *myobj; static PyTypeObject *PyMemberDescr_TypePtr = NULL; /* a PyMemberDescr_Type* */ static PyTypeObject *PyGetSetDescr_TypePtr = NULL; /* a PyGetSetDescr_Type* */ static PyTypeObject *PyMethodDescr_TypePtr = NULL; /* a PyClassMethodDescr_Type* */ - /* Don't add docstrings */ - if (Py_OptimizeFlag > 1) { - Py_RETURN_NONE; - } - if (PyGetSetDescr_TypePtr == NULL) { - /* Get "subdescr" */ myobj = PyDict_GetItemString(tp_dict, "name"); if (myobj != NULL) { PyGetSetDescr_TypePtr = Py_TYPE(myobj); @@ -661,63 +653,72 @@ PyMethodDescr_TypePtr = Py_TYPE(myobj); } } + if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str)) { + return NULL; + } if (PyMethodDescr_TypePtr == PyMemberDescr_TypePtr || PyMethodDescr_TypePtr == PyGetSetDescr_TypePtr || PyMemberDescr_TypePtr == PyGetSetDescr_TypePtr) { PyErr_Format(PyExc_RuntimeError, "at least two of the 'Py{Method,Member,GetSet}Descr_Type's are the same\n" - "(in add_docstring %s %d)", __FILE__, __LINE__); - return NULL; - } - if (!PyArg_ParseTuple(args, "OO!", &obj, &PyString_Type, &str)) { + "(in cmp_docstring %s %d)", __FILE__, __LINE__); return NULL; } docstr = PyString_AS_STRING(str); #define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type) #define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) do { \ +#define _CMPDOC(typebase, doc, name) do { \ Py##typebase##Object *new = (Py##typebase##Object *)obj; \ if (!(doc)) { \ - doc = docstr; \ + PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ + return NULL; \ } \ else { \ - PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ - return NULL; \ + if (strcmp(doc, docstr) != 0) \ + { \ + PyErr_Format(PyExc_RuntimeError, \ + "%s method's docstring '%s' is not '%s'", \ + name, doc, docstr); \ + return NULL; \ + } \ } \ } while (0) if (_TESTDOC1(CFunction)) { - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + _CMPDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); } else if (_TESTDOC1(Type)) { - _ADDDOC(Type, new->tp_doc, new->tp_name); + _CMPDOC(Type, new->tp_doc, new->tp_name); } else if (_TESTDOC2(MemberDescr)) { - /* docless_member ends up here */ - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + _CMPDOC(MemberDescr, new->d_member->doc, new->d_member->name); } else if (_TESTDOC2(GetSetDescr)) { - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + //_CMPDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); } else if (_TESTDOC2(MethodDescr)) { - _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + _CMPDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); } else { PyObject *doc_attr; doc_attr = PyObject_GetAttrString(obj, "__doc__"); - if (doc_attr != NULL && doc_attr != Py_None) { + if (doc_attr == NULL || doc_attr == Py_None) { PyErr_Format(PyExc_RuntimeError, "object %s", msg); return NULL; } + + attr_as_str = PyString_AS_STRING(doc_attr); + if (strcmp(attr_as_str, docstr) != 0) + { + PyErr_Format(PyExc_RuntimeError, + "objects's docstring '%s' is not '%s'", + attr_as_str, docstr); + Py_XDECREF(doc_attr); + return NULL; + } Py_XDECREF(doc_attr); - - if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { - PyErr_SetString(PyExc_TypeError, - "Cannot set a docstring for that object"); - return NULL; - } Py_RETURN_NONE; } @@ -725,7 +726,6 @@ #undef _TESTDOC2 #undef _ADDDOC - Py_INCREF(str); Py_RETURN_NONE; } @@ -734,7 +734,7 @@ static PyMethodDef foo_functions[] = { {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, - {"add_docstring", (PyCFunction)add_docstring, METH_VARARGS, NULL}, + {"cmp_docstring", (PyCFunction)cmp_docstring, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -55,9 +55,21 @@ raises(SystemError, "obj.broken_member = 42") assert module.fooType.broken_member.__doc__ is None assert module.fooType.object_member.__doc__ == "A Python object." + for m in dir(module.fooType): + obj = getattr(module.fooType, m) + if 'getset' in str(type(obj)): + # segfaults + continue + if 'type' in str(type(obj)): + # leaks a None reference + continue + docstring = obj.__doc__ + if not docstring: + raises(RuntimeError, module.cmp_docstring, obj, 'random') + else: + import pdb;pdb.set_trace() + module.cmp_docstring(obj, docstring) assert str(type(module.fooType.int_member)) == "" - module.add_docstring(module.fooType.docless_member, "docstring for docless_member") - assert module.fooType.docless_member.__doc__ == "docstring for docless_member" def test_typeobject_object_member(self): module = self.import_module(name='foo') From pypy.commits at gmail.com Fri Jan 15 09:33:59 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:59 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: make test more complete, now fails Message-ID: <56990357.0c2e1c0a.b5143.01fc@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81799:07e8b87a262b Date: 2016-01-15 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/07e8b87a262b/ Log: make test more complete, now fails diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -57,12 +57,6 @@ assert module.fooType.object_member.__doc__ == "A Python object." for m in dir(module.fooType): obj = getattr(module.fooType, m) - if 'getset' in str(type(obj)): - # segfaults - continue - if 'type' in str(type(obj)): - # leaks a None reference - continue docstring = obj.__doc__ if not docstring: raises(RuntimeError, module.cmp_docstring, obj, 'random') From pypy.commits at gmail.com Fri Jan 15 09:33:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:33:56 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add a make_typedescr for W_MemeberDesc Message-ID: <56990354.2179c20a.9de87.0c32@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81797:da2c16ceb152 Date: 2016-01-15 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/da2c16ceb152/ Log: add a make_typedescr for W_MemeberDesc diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -16,7 +16,8 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, PyObjectFields, + PyObject) from pypy.module.cpyext.methodobject import ( PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) from pypy.module.cpyext.modsupport import convert_method_defs @@ -99,6 +100,51 @@ ) assert not W_MemberDescr.typedef.acceptable_as_base_class # no __new__ +PyDescrObject = lltype.ForwardReference() +PyDescrObjectPtr = lltype.Ptr(PyDescrObject) +PyDescrObjectFields = PyObjectFields + ( + ("d_type", PyTypeObjectPtr), + ("d_name", PyObject), + ) +cpython_struct("PyDescrObject", PyDescrObjectFields, + PyDescrObject) + +PyMemberDescrObjectStruct = lltype.ForwardReference() +PyMemberDescrObject = lltype.Ptr(PyMemberDescrObjectStruct) +PyMemberDescrObjectFields = PyDescrObjectFields + ( + ("d_member", lltype.Ptr(PyMemberDef)), + ) +cpython_struct("PyMemberDescrObject", PyMemberDescrObjectFields, + PyMemberDescrObjectStruct, level=2) + + at bootstrap_function +def init_memberdescrobject(space): + make_typedescr(W_MemberDescr.typedef, + basestruct=PyMemberDescrObject.TO, + attach=memberdescr_attach, + realize=memberdescr_realize, + ) + +def memberdescr_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyMemberDescrObject with the given W_MemberDescr + object. The values must not be modified. + """ + py_memberdescr = rffi.cast(PyMemberDescrObject, py_obj) + # XXX assign to d_dname, d_type? + py_memberdescr.c_d_member = w_obj.member + +def memberdescr_realize(space, obj): + # XXX NOT TESTED When is this ever called? + member = rffi.cast(PyMemberDef, obj) + w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) + w_obj = space.allocate_instance(W_MemberDescr, w_type) + w_obj.__init__(member, w_type) + track_reference(space, obj, w_obj) + state = space.fromcache(RefcountState) + state.set_lifeline(w_obj, obj) + return w_obj + def convert_getset_defs(space, dict_w, getsets, w_type): getsets = rffi.cast(rffi.CArrayPtr(PyGetSetDef), getsets) if getsets: @@ -373,7 +419,6 @@ track_reference(space, py_tuple, space.w_tuple, replace=True) track_reference(space, py_str, space.w_str, replace=True) - @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): pto = obj.c_ob_type From pypy.commits at gmail.com Fri Jan 15 09:34:01 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 15 Jan 2016 06:34:01 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix translation Message-ID: <56990359.6918c20a.a5769.096c@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81800:efd106c73dda Date: 2016-01-15 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/efd106c73dda/ Log: fix translation diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -132,11 +132,12 @@ """ py_memberdescr = rffi.cast(PyMemberDescrObject, py_obj) # XXX assign to d_dname, d_type? + assert isinstance(w_obj, W_MemberDescr) py_memberdescr.c_d_member = w_obj.member def memberdescr_realize(space, obj): # XXX NOT TESTED When is this ever called? - member = rffi.cast(PyMemberDef, obj) + member = rffi.cast(lltype.Ptr(PyMemberDef), obj) w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) w_obj = space.allocate_instance(W_MemberDescr, w_type) w_obj.__init__(member, w_type) From pypy.commits at gmail.com Fri Jan 15 10:01:10 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 07:01:10 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: hg merge default Message-ID: <569909b6.44e21c0a.53a83.0f71@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81801:96823f281a0c Date: 2016-01-15 13:25 +0000 http://bitbucket.org/pypy/pypy/changeset/96823f281a0c/ Log: hg merge default diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -108,4 +108,5 @@ Optimize the case where, in a new C-created thread, we keep invoking short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x diff --git a/pypy/module/test_lib_pypy/test_cPickle.py b/pypy/module/test_lib_pypy/test_cPickle.py --- a/pypy/module/test_lib_pypy/test_cPickle.py +++ b/pypy/module/test_lib_pypy/test_cPickle.py @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -722,7 +722,8 @@ def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): if bytes != 8: raise Exception("gc_load_indexed_f is only for 'double'!") - return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -330,15 +330,11 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - if funcobj.random_effects_on_gcobjs: - return True - except (AttributeError, lltype.DelayedPointer): - return True # better safe than sorry + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( - op, seen) + funcobj, seen) def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -399,9 +399,7 @@ rop.GC_LOAD_I, rop.GC_LOAD_R, rop.GC_LOAD_F, - rop.GC_LOAD_INDEXED_I, rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_F, rop.GC_STORE, rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -21,7 +21,10 @@ pass -class CachedField(object): +class AbstractCachedEntry(object): + """ abstract base class abstracting over the difference between caching + struct fields and array items. """ + def __init__(self): # Cache information for a field descr, or for an (array descr, index) # pair. It can be in one of two states: @@ -29,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -38,43 +41,39 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None - self._lazy_setfield_registered = False + self._lazy_set = None - def register_dirty_field(self, structop, info): + def register_info(self, structop, info): + # invariant: every struct or array ptr info, that is not virtual and + # that has a non-None entry at + # info._fields[descr.get_index()] + # must be in cache_infos self.cached_structs.append(structop) self.cached_infos.append(info) - def invalidate(self, descr): - for opinfo in self.cached_infos: - assert isinstance(opinfo, info.AbstractStructPtrInfo) - opinfo._fields[descr.get_index()] = None - self.cached_infos = [] - self.cached_structs = [] - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) - arg1 = optheap.get_box_replacement(self._getvalue(op)) + arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -87,58 +86,43 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield, and register - # myself in the optheap's _lazy_setfields_and_arrayitems list - self._lazy_setfield = op - #if not self._lazy_setfield_registered: - # self._lazy_setfield_registered = True + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield - return optheap.get_box_replacement(self._getvalue(op)) + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set + return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) if res is not None: return res.get_box_replacement() return None - def _getvalue(self, op): - return op.getarg(1) - - def _getfield(self, opinfo, descr, optheap, true_force=True): - res = opinfo.getfield(descr, optheap) - if isinstance(res, PreambleOp): - if not true_force: - return res.op - res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) - return res - - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -151,25 +135,74 @@ # back in the cache: the value of this particular structure's # field. opinfo = optheap.ensure_ptr_info_arg0(op) - self._setfield(op, opinfo, optheap) + self.put_field_back_to_info(op, opinfo, optheap) elif not can_cache: self.invalidate(descr) - def _setfield(self, op, opinfo, optheap): + + # abstract methods + + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ + raise NotImplementedError("abstract method") + + def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ + raise NotImplementedError("abstract method") + + def _getfield(self, opinfo, descr, optheap, true_force=True): + raise NotImplementedError("abstract method") + + def invalidate(self, descr): + """ clear all the cached knowledge in the infos in self.cached_infos. + """ + raise NotImplementedError("abstract method") + + +class CachedField(AbstractCachedEntry): + def _get_rhs_from_set_op(self, op): + return op.getarg(1) + + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setfield(op.getdescr(), struct, arg, optheap, self) + opinfo.setfield(op.getdescr(), struct, arg, optheap=optheap, cf=self) -class ArrayCachedField(CachedField): + def _getfield(self, opinfo, descr, optheap, true_force=True): + res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos + if isinstance(res, PreambleOp): + if not true_force: + return res.op + res = optheap.optimizer.force_op_from_preamble(res) + opinfo.setfield(descr, None, res, optheap=optheap) + return res + + def invalidate(self, descr): + for opinfo in self.cached_infos: + assert isinstance(opinfo, info.AbstractStructPtrInfo) + opinfo._fields[descr.get_index()] = None + self.cached_infos = [] + self.cached_structs = [] + + +class ArrayCachedItem(AbstractCachedEntry): def __init__(self, index): self.index = index - CachedField.__init__(self) + AbstractCachedEntry.__init__(self) - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): return op.getarg(2) def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getitem(descr, self.index, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.ArrayPtrInfo): + assert opinfo in self.cached_infos if (isinstance(res, PreambleOp) and optheap.optimizer.cpu.supports_guard_gc_type): if not true_force: @@ -179,10 +212,10 @@ opinfo.setitem(descr, index, None, res, optheap=optheap) return res - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) + opinfo.setitem(op.getdescr(), self.index, struct, arg, optheap=optheap, cf=self) def invalidate(self, descr): for opinfo in self.cached_infos: @@ -201,15 +234,11 @@ self.postponed_op = None - # XXXX the rest is old - # cached array items: {array descr: {index: CachedField}} - #self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # - self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False @@ -221,7 +250,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -234,7 +263,7 @@ descrkeys = self.cached_fields.keys() if not we_are_translated(): # XXX Pure operation of boxes that are cached in several places will - # only be removed from the peeled loop when red from the first + # only be removed from the peeled loop when read from the first # place discovered here. This is far from ideal, as it makes # the effectiveness of our optimization a bit random. It should # howevere always generate correct results. For tests we dont @@ -249,14 +278,7 @@ d.produce_potential_short_preamble_ops(self.optimizer, sb, descr, index) - def register_dirty_field(self, descr, op, info): - self.field_cache(descr).register_dirty_field(op, info) - - def register_dirty_array_field(self, arraydescr, op, index, info): - self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info) - def clean_caches(self): - del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) @@ -285,7 +307,7 @@ try: cf = submap[index] except KeyError: - cf = submap[index] = ArrayCachedField(index) + cf = submap[index] = ArrayCachedItem(index) return cf def emit_operation(self, op): @@ -304,7 +326,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -332,7 +354,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -410,7 +432,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -420,7 +442,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -431,16 +453,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -449,35 +471,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -489,7 +511,7 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): @@ -503,7 +525,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) + structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -554,12 +576,12 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) - # the remember the result of reading the array item + # then remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), - self.get_box_replacement(op), cf, - self) + self.get_box_replacement(op), optheap=self, + cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -196,28 +196,28 @@ def all_items(self): return self._fields - def setfield(self, descr, struct, op, optheap=None, cf=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) assert isinstance(op, AbstractValue) - self._fields[descr.get_index()] = op + self._fields[fielddescr.get_index()] = op if cf is not None: assert not self.is_virtual() assert struct is not None - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) - def getfield(self, descr, optheap=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) - return self._fields[descr.get_index()] + def getfield(self, fielddescr, optheap=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) + return self._fields[fielddescr.get_index()] def _force_elements(self, op, optforce, descr): if self._fields is None: return - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], - descr=flddescr) + descr=fielddescr) self._fields[i] = None optforce.emit_operation(setfieldop) @@ -249,16 +249,16 @@ if fieldinfo and fieldinfo.is_virtual(): fieldinfo.visitor_walk_recursive(op, visitor, optimizer) - def produce_short_preamble_ops(self, structbox, descr, index, optimizer, + def produce_short_preamble_ops(self, structbox, fielddescr, index, optimizer, shortboxes): if self._fields is None: return - if descr.get_index() >= len(self._fields): + if fielddescr.get_index() >= len(self._fields): # we don't know about this item return - op = optimizer.get_box_replacement(self._fields[descr.get_index()]) - opnum = OpHelpers.getfield_for_descr(descr) - getfield_op = ResOperation(opnum, [structbox], descr=descr) + op = optimizer.get_box_replacement(self._fields[fielddescr.get_index()]) + opnum = OpHelpers.getfield_for_descr(fielddescr) + getfield_op = ResOperation(opnum, [structbox], descr=fielddescr) shortboxes.add_heap_op(op, getfield_op) def _is_immutable_and_filled_with_constants(self, optimizer, memo=None): @@ -294,12 +294,12 @@ return True def _force_elements_immutable(self, descr, constptr, optforce): - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] subbox = optforce.force_box(fld) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, - flddescr, constptr, subbox) + fielddescr, constptr, subbox) class InstancePtrInfo(AbstractStructPtrInfo): _attrs_ = ('_known_class',) @@ -505,6 +505,7 @@ info._items = self._items[:] def _force_elements(self, op, optforce, descr): + # XXX descr = op.getdescr() const = optforce.new_const_item(self.descr) for i in range(self.length): @@ -523,15 +524,16 @@ optforce.emit_operation(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: assert not self.is_virtual() - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getitem(self, descr, index, optheap=None): if self._items is None or index >= len(self._items): @@ -626,13 +628,13 @@ i = 0 fielddescrs = op.getdescr().get_all_fielddescrs() for index in range(self.length): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: fld = self._items[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], - descr=flddescr) + descr=fielddescr) optforce.emit_operation(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here @@ -645,7 +647,7 @@ fielddescrs = self.descr.get_all_fielddescrs() i = 0 for index in range(self.getlength()): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: itemop = self._items[i] if (itemop is not None and not isinstance(itemop, Const)): @@ -691,21 +693,21 @@ optheap.const_infos[ref] = info return info - def getfield(self, descr, optheap=None): - info = self._get_info(descr.get_parent_descr(), optheap) - return info.getfield(descr) + def getfield(self, fielddescr, optheap=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + return info.getfield(fielddescr) def getitem(self, descr, index, optheap=None): info = self._get_array_info(descr, optheap) return info.getitem(descr, index) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): info = self._get_array_info(descr, optheap) - info.setitem(descr, index, struct, op, cf) + info.setitem(descr, index, struct, op, optheap=optheap, cf=cf) - def setfield(self, descr, struct, op, optheap=None, cf=None): - info = self._get_info(descr.get_parent_descr(), optheap) - info.setfield(descr, struct, op, optheap, cf) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + info.setfield(fielddescr, struct, op, optheap=optheap, cf=cf) def is_null(self): return not bool(self._const.getref_base()) diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -81,7 +81,7 @@ assert index >= 0 cf = optheap.arrayitem_cache(descr, index) opinfo.setitem(self.getfield_op.getdescr(), index, self.res, - pop, cf, optheap=optheap) + pop, optheap, cf) def repr(self, memo): return "HeapOp(%s, %s)" % (self.res.repr(memo), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -696,58 +696,6 @@ # ---------- - def test_virtual_1(self): - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - setfield_gc(p0, i1, descr=valuedescr) - jump(i, p0) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_float(self): - ops = """ - [f, p0] - f0 = getfield_gc(p0, descr=floatdescr) - f1 = float_add(f0, f) - setfield_gc(p0, f1, descr=floatdescr) - jump(f, p0) - """ - expected = """ - [f, f2] - f1 = float_add(f2, f) - jump(f, f1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected) - - def test_virtual_2(self): - py.test.skip("XXX") - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - p1 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p1, i1, descr=valuedescr) - jump(i, p1) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_oois(self): ops = """ [p0, p1, p2] @@ -774,20 +722,6 @@ guard_false(i12) [] jump(p0, p1, p2) """ - expected = """ - [p2] - # all constant-folded :-) - jump(p2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable), - Virtual(node_vtable), - Not''', - expected) - # - # to be complete, we also check the no-opt case where most comparisons - # are not removed. The exact set of comparisons removed depends on - # the details of the algorithm... expected2 = """ [p0, p1, p2] guard_nonnull(p0) [] @@ -801,26 +735,6 @@ """ self.optimize_loop(ops, expected2) - def test_virtual_default_field(self): - py.test.skip("XXX") - ops = """ - [p0] - i0 = getfield_gc(p0, descr=valuedescr) - guard_value(i0, 0) [] - p1 = new_with_vtable(ConstClass(node_vtable)) - # the field 'value' has its default value of 0 - jump(p1) - """ - expected = """ - [i] - guard_value(i, 0) [] - jump(0) - """ - # the 'expected' is sub-optimal, but it should be done by another later - # optimization step. See test_find_nodes_default_field() for why. - self.optimize_loop(ops, 'Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_3(self): ops = """ [i] @@ -837,55 +751,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_4(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - jump(i3, p1) - """ - expected = """ - [i0, i1] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_5(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p2 = new_with_vtable(descr=nodesize2) - setfield_gc(p2, i1, descr=valuedescr) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - setfield_gc(p1, p2, descr=nextdescr) - jump(i3, p1) - """ - expected = """ - [i0, i1, i1bis] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2, i1) - """ - self.optimize_loop(ops, - '''Not, Virtual(node_vtable, - valuedescr=Not, - nextdescr=Virtual(node_vtable2, - valuedescr=Not))''', - expected) - def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1209,27 +1074,6 @@ """ self.optimize_loop(ops, expected) - def test_varray_2(self): - ops = """ - [i0, p1] - i1 = getarrayitem_gc(p1, 0, descr=arraydescr) - i2 = getarrayitem_gc(p1, 1, descr=arraydescr) - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - p2 = new_array(2, descr=arraydescr) - setarrayitem_gc(p2, 1, i0, descr=arraydescr) - setarrayitem_gc(p2, 0, 20, descr=arraydescr) - jump(i0, p2) - """ - expected = """ - [i0, i1, i2] - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - jump(i0, 20, i0) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr, Not, Not)', expected) - def test_p123_array(self): ops = """ [i1, p2, p3] @@ -1264,23 +1108,6 @@ """ self.optimize_loop(ops, expected) - def test_vstruct_1(self): - py.test.skip("XXX") - ops = """ - [i1, p2] - i2 = getfield_gc(p2, descr=adescr) - escape_n(i2) - p3 = new(descr=ssize) - setfield_gc(p3, i1, descr=adescr) - jump(i1, p3) - """ - expected = """ - [i1, i2] - escape_n(i2) - jump(i1, i1) - """ - self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected) - def test_p123_vstruct(self): ops = """ [i1, p2, p3] @@ -1443,26 +1270,6 @@ """ self.optimize_loop(ops, expected) - def test_duplicate_getfield_guard_value_const(self): - ops = """ - [p1] - guard_value(p1, ConstPtr(myptr)) [] - i1 = getfield_gc_i(p1, descr=valuedescr) - i2 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i2) - jump(p1) - """ - expected = """ - [] - i1 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i1) - jump() - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr)', expected) - def test_duplicate_getfield_sideeffects_1(self): ops = """ [p1] @@ -1688,12 +1495,12 @@ jump(p1, i1, i2) """ expected = """ - [i1, i2] + [p1, i1, i2] + guard_value(p1, ConstPtr(myptr)) [] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) - jump(i1, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr), Not, Not', expected) + jump(ConstPtr(myptr), i1, i2) + """ + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_1(self): ops = """ @@ -1870,163 +1677,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_1(self): - ops = """ - [i0, p1] - p4 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_with_vtable(descr=nodesize) - p3 = escape_r() - setfield_gc(p2, p3, descr=nextdescr) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, nextdescr=Not)', - expected) - - def test_bug_2(self): - ops = """ - [i0, p1] - p4 = getarrayitem_gc(p1, 0, descr=arraydescr2) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_array(1, descr=arraydescr2) - p3 = escape_r() - setarrayitem_gc(p2, 0, p3, descr=arraydescr2) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr2, Not)', - expected) - - def test_bug_3(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - p2a = new_with_vtable(descr=nodesize) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_bug_3bis(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_invalid_loop_1(self): - ops = """ - [p1] - guard_isnull(p1) [] - # - p2 = new_with_vtable(descr=nodesize) - jump(p2) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, - ops, 'Virtual(node_vtable)', None) - - def test_invalid_loop_2(self): - py.test.skip("this would fail if we had Fixed again in the specnodes") - ops = """ - [p1] - guard_class(p1, ConstClass(node_vtable2)) [] - # - p2 = new_with_vtable(descr=nodesize) - escape_n(p2) # prevent it from staying Virtual - jump(p2) - """ - py.test.raises(InvalidLoop, self.optimize_loop, - ops, '...', None) - - def test_invalid_loop_3(self): - ops = """ - [p1] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_isnull(p2) [] - # - p3 = new_with_vtable(descr=nodesize) - p4 = new_with_vtable(descr=nodesize) - setfield_gc(p3, p4, descr=nextdescr) - jump(p3) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, ops, - 'Virtual(node_vtable, nextdescr=Virtual(node_vtable))', - None) - def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2060,7 +1711,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2078,7 +1728,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2625,26 +2274,6 @@ where p2 is a node_vtable, valuedescr=i2, nextdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_6(self): - ops = """ - [p0, i0, i1] - guard_true(i0) [p0] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i1, i1) - """ - expected = """ - [i1b, i0, i1] - guard_true(i0) [i1b] - jump(i1, i1, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable, valuedescr=Not), - Not, Not''', expected) - self.check_expanded_fail_descr('''p0 - where p0 is a node_vtable, valuedescr=i1b - ''', rop.GUARD_TRUE) - def test_expand_fail_varray(self): ops = """ [i1] @@ -2686,47 +2315,6 @@ where p2 is a vstruct ssize, adescr=i1, bdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_v_all_1(self): - ops = """ - [i1, p1a, i2] - p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) - p7v = getfield_gc_r(p6s, descr=bdescr) - p5s = new(descr=ssize) - setfield_gc(p5s, i2, descr=adescr) - setfield_gc(p5s, p7v, descr=bdescr) - setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) - guard_true(i1) [p1a] - p2s = new(descr=ssize) - p3v = new_with_vtable(descr=nodesize) - p4a = new_array(2, descr=arraydescr2) - setfield_gc(p2s, i1, descr=adescr) - setfield_gc(p2s, p3v, descr=bdescr) - setfield_gc(p3v, i2, descr=valuedescr) - setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) - jump(i1, p4a, i2) - """ - expected = """ - [i1, ia, iv, pnull, i2] - guard_true(i1) [ia, iv, i2] - jump(1, 1, i2, NULL, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, ''' - Not, - VArray(arraydescr2, - VStruct(ssize, - adescr=Not, - bdescr=Virtual(node_vtable, - valuedescr=Not)), - Not), - Not''', expected) - self.check_expanded_fail_descr('''p1a - where p1a is a varray arraydescr2: p6s, p5s - where p6s is a vstruct ssize, adescr=ia, bdescr=p7v - where p5s is a vstruct ssize, adescr=i2, bdescr=p7v - where p7v is a node_vtable, valuedescr=iv - ''', rop.GUARD_TRUE) - def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] @@ -5179,6 +4767,8 @@ """ self.optimize_loop(ops, expected) + def test_intmod_bounds_harder(self): + py.test.skip("harder") # Of course any 'maybe-negative % power-of-two' can be turned into # int_and(), but that's a bit harder to detect here because it turns # into several operations, and of course it is wrong to just turn @@ -5196,7 +4786,6 @@ i4 = int_and(i0, 15) finish(i4) """ - py.test.skip("harder") self.optimize_loop(ops, expected) def test_intmod_bounds_bug1(self): @@ -5357,7 +4946,7 @@ i5 = int_lt(i2, i1) guard_true(i5) [] - i6 = getarrayitem_gc(p0, i2) + i6 = getarrayitem_gc_i(p0, i2, descr=chararraydescr) finish(i6) """ expected = """ @@ -5368,7 +4957,7 @@ i4 = int_lt(i2, i0) guard_true(i4) [] - i6 = getarrayitem_gc(p0, i3) + i6 = getarrayitem_gc_i(p0, i3, descr=chararraydescr) finish(i6) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2969,7 +2969,6 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3015,7 +3014,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3039,7 +3037,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -35,15 +35,11 @@ return True return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except lltype.DelayedPointer: + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: return True - if getattr(funcobj, 'random_effects_on_gcobjs', False): - return True - return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_external_call( + self, funcobj, seen) def analyze_simple_operation(self, op, graphinfo): if op.opname in ('malloc', 'malloc_varsize'): flags = op.args[1].value diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py --- a/rpython/rlib/rstacklet.py +++ b/rpython/rlib/rstacklet.py @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff --git a/rpython/rlib/test/test_rstacklet.py b/rpython/rlib/test/test_rstacklet.py --- a/rpython/rlib/test/test_rstacklet.py +++ b/rpython/rlib/test/test_rstacklet.py @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -22,7 +22,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, + Ptr, ContainerType, FuncType, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper import rclass @@ -876,18 +876,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -22,8 +22,7 @@ log.WARNING("Unknown operation: %s" % op.opname) return True - def analyze_external_call(self, op, seen=None): - fnobj = op.args[0].value._obj + def analyze_external_call(self, fnobj, seen=None): return getattr(fnobj, 'canraise', True) analyze_exceptblock = None # don't call this diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -21,12 +21,8 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): return False diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -55,11 +54,7 @@ def analyze_startblock(self, block, seen=None): return self.bottom_result() - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except DelayedPointer: - return self.bottom_result() + def analyze_external_call(self, funcobj, seen=None): result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper @@ -80,12 +75,24 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - graph = get_graph(op.args[0], self.translator) - if graph is None: - x = self.analyze_external_call(op, seen) + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() + if getattr(funcobj, 'external', None) is not None: + x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -204,8 +204,7 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - z = lltype.functionptr(lltype.FuncType([lltype.Signed], lltype.Signed), - 'foobar') + z = llexternal('z', [lltype.Signed], lltype.Signed) def g(x): return z(x) t, ra = self.translate(g, [int]) diff --git a/rpython/translator/backendopt/test/test_graphanalyze.py b/rpython/translator/backendopt/test/test_graphanalyze.py --- a/rpython/translator/backendopt/test/test_graphanalyze.py +++ b/rpython/translator/backendopt/test/test_graphanalyze.py @@ -1,7 +1,7 @@ import random from rpython.tool.algo.unionfind import UnionFind -from rpython.translator.backendopt.graphanalyze import Dependency -from rpython.translator.backendopt.graphanalyze import DependencyTracker +from rpython.translator.backendopt.graphanalyze import (Dependency, + DependencyTracker, BoolGraphAnalyzer) class FakeGraphAnalyzer: @@ -49,3 +49,30 @@ method1 = rectrack(n, tracker) method2 = expected(n) assert method1 == method2 + + +def test_delayed_fnptr(): + from rpython.flowspace.model import SpaceOperation + from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.translator.translator import TranslationContext + t = TranslationContext() + t.buildannotator() + t.buildrtyper() + annhelper = MixLevelHelperAnnotator(t.rtyper) + def f(): + pass + c_f = annhelper.constfunc(f, [], None) + op = SpaceOperation('direct_call', [c_f], None) + analyzer = BoolGraphAnalyzer(t) + assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): From pypy.commits at gmail.com Fri Jan 15 10:01:14 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 07:01:14 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: Forgot to "hg add" these new tests Message-ID: <569909ba.cf821c0a.68ca1.0d64@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81802:0129e05ba29e Date: 2016-01-15 15:14 +0100 http://bitbucket.org/pypy/pypy/changeset/0129e05ba29e/ Log: Forgot to "hg add" these new tests diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py @@ -0,0 +1,1 @@ +# Generated by pypy/tool/import_cffi.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") + + from _add1_cffi import ffi + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py @@ -0,0 +1,30 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add2(int, int, int); +""") + +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") + + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + ffi = _add2_cffi.ffi + + @ffi.def_extern() + def add2(x, y, z): + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py @@ -0,0 +1,25 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add3(int, int, int, int); +""") + +ffi.embedding_init_code(r""" + from _add3_cffi import ffi + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int (*my_callback)(int); + int add_rec(int, int); +""") + +ffi.embedding_init_code(r""" + from _add_recursive_cffi import ffi, lib + import sys + print("preparing REC") + sys.stdout.flush() + + @ffi.def_extern() + def add_rec(x, y): + print("adding %d and %d" % (x, y)) + sys.stdout.flush() + return x + y + + x = lib.my_callback(400) + print('<<< %d >>>' % (x,)) +""") + +ffi.set_source("_add_recursive_cffi", """ +/* use CFFI_DLLEXPORT: on windows, it expands to __declspec(dllexport), + which is needed to export a variable from a dll */ +CFFI_DLLEXPORT int (*my_callback)(int); +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py @@ -0,0 +1,22 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _perf_cffi import ffi + + @ffi.def_extern() + def add1(x, y): + return x + y +""") + +ffi.set_source("_perf_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -0,0 +1,151 @@ +# Generated by pypy/tool/import_cffi.py +import py +import sys, os, re +import shutil, subprocess, time +from pypy.module.test_lib_pypy.cffi_tests.udir import udir +import cffi + +if hasattr(sys, 'gettotalrefcount'): + py.test.skip("tried hard and failed to have these tests run " + "in a debug-mode python") + + +local_dir = os.path.dirname(os.path.abspath(__file__)) +_link_error = '?' + +def check_lib_python_found(tmpdir): + global _link_error + if _link_error == '?': + ffi = cffi.FFI() + kwds = {} + ffi._apply_embedding_fix(kwds) + ffi.set_source("_test_lib_python_found", "", **kwds) + try: + ffi.compile(tmpdir=tmpdir) + except cffi.VerificationError as e: + _link_error = e + else: + _link_error = None + if _link_error: + py.test.skip(str(_link_error)) + + +class EmbeddingTests: + _compiled_modules = {} + + def setup_method(self, meth): + check_lib_python_found(str(udir.ensure('embedding', dir=1))) + self._path = udir.join('embedding', meth.__name__) + if sys.platform == "win32": + self._compiled_modules.clear() # workaround + + def get_path(self): + return str(self._path.ensure(dir=1)) + + def _run(self, args, env=None): + print(args) + popen = subprocess.Popen(args, env=env, cwd=self.get_path(), + stdout=subprocess.PIPE, + universal_newlines=True) + output = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("popen failed with exit code %r: %r" % ( + err, args)) + print(output.rstrip()) + return output + + def prepare_module(self, name): + if name not in self._compiled_modules: + path = self.get_path() + filename = '%s.py' % name + # NOTE: if you have an .egg globally installed with an older + # version of cffi, this will not work, because sys.path ends + # up with the .egg before the PYTHONPATH entries. I didn't + # find a solution to that: we could hack sys.path inside the + # script run here, but we can't hack it in the same way in + # execute(). + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + output = self._run([sys.executable, os.path.join(local_dir, filename)], + env=env) + match = re.compile(r"\bFILENAME: (.+)").search(output) + assert match + dynamic_lib_name = match.group(1) + if sys.platform == 'win32': + assert dynamic_lib_name.endswith('_cffi.dll') + else: + assert dynamic_lib_name.endswith('_cffi.so') + self._compiled_modules[name] = dynamic_lib_name + return self._compiled_modules[name] + + def compile(self, name, modules, opt=False, threads=False, defines={}): + path = self.get_path() + filename = '%s.c' % name + shutil.copy(os.path.join(local_dir, filename), path) + shutil.copy(os.path.join(local_dir, 'thread-test.h'), path) + import distutils.ccompiler + curdir = os.getcwd() + try: + os.chdir(self.get_path()) + c = distutils.ccompiler.new_compiler() + print('compiling %s with %r' % (name, modules)) + extra_preargs = [] + if sys.platform == 'win32': + libfiles = [] + for m in modules: + m = os.path.basename(m) + assert m.endswith('.dll') + libfiles.append('Release\\%s.lib' % m[:-4]) + modules = libfiles + elif threads: + extra_preargs.append('-pthread') + objects = c.compile([filename], macros=sorted(defines.items()), debug=True) + c.link_executable(objects + modules, name, extra_preargs=extra_preargs) + finally: + os.chdir(curdir) + + def execute(self, name): + path = self.get_path() + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + libpath = env.get('LD_LIBRARY_PATH') + if libpath: + libpath = path + ':' + libpath + else: + libpath = path + env['LD_LIBRARY_PATH'] = libpath + print('running %r in %r' % (name, path)) + executable_name = name + if sys.platform == 'win32': + executable_name = os.path.join(path, executable_name + '.exe') + popen = subprocess.Popen([executable_name], cwd=path, env=env, + stdout=subprocess.PIPE, + universal_newlines=True) + result = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("%r failed with exit code %r" % (name, err)) + return result + + +class TestBasic(EmbeddingTests): + def test_basic(self): + add1_cffi = self.prepare_module('add1') + self.compile('add1-test', [add1_cffi]) + output = self.execute('add1-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") + + def test_two_modules(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('add2-test', [add1_cffi, add2_cffi]) + output = self.execute('add2-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "prepADD2\n" + "adding 100 and -5 and -20\n" + "got: 42 75\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py @@ -0,0 +1,53 @@ +# Generated by pypy/tool/import_cffi.py +import sys +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + +if sys.platform == 'win32': + import py + py.test.skip("written with POSIX functions") + + +class TestPerformance(EmbeddingTests): + def test_perf_single_threaded(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_1_thread(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '1'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_2_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '2'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_4_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '4'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_8_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '8'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py @@ -0,0 +1,16 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestRecursive(EmbeddingTests): + def test_recursive(self): + add_recursive_cffi = self.prepare_module('add_recursive') + self.compile('add_recursive-test', [add_recursive_cffi]) + output = self.execute('add_recursive-test') + assert output == ("preparing REC\n" + "some_callback(400)\n" + "adding 400 and 9\n" + "<<< 409 >>>\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py @@ -0,0 +1,62 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThread(EmbeddingTests): + def test_first_calls_in_parallel(self): + add1_cffi = self.prepare_module('add1') + self.compile('thread1-test', [add1_cffi], threads=True) + for i in range(50): + output = self.execute('thread1-test') + assert output == ("starting\n" + "preparing...\n" + + "adding 40 and 2\n" * 10 + + "done\n") + + def _take_out(self, text, content): + assert content in text + i = text.index(content) + return text[:i] + text[i+len(content):] + + def test_init_different_modules_in_different_threads(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], threads=True) + output = self.execute('thread2-test') + output = self._take_out(output, "preparing") + output = self._take_out(output, ".") + output = self._take_out(output, ".") + # at least the 3rd dot should be after everything from ADD2 + assert output == ("starting\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + ".\n" + "adding 40 and 2\n" + "done\n") + + def test_alt_issue(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], + threads=True, defines={'T2TEST_AGAIN_ADD1': '1'}) + output = self.execute('thread2-test') + output = self._take_out(output, "adding 40 and 2\n") + assert output == ("starting\n" + "preparing...\n" + "adding -1 and -1\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + "done\n") + + def test_load_in_parallel_more(self): + add2_cffi = self.prepare_module('add2') + add3_cffi = self.prepare_module('add3') + self.compile('thread3-test', [add2_cffi, add3_cffi], threads=True) + for i in range(150): + output = self.execute('thread3-test') + for j in range(10): + output = self._take_out(output, "adding 40 and 2 and 100\n") + output = self._take_out(output, "adding 1000, 200, 30, 4\n") + assert output == ("starting\n" + "prepADD2\n" + "done\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py @@ -0,0 +1,11 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThreadLocal(EmbeddingTests): + def test_thread_local(self): + tlocal_cffi = self.prepare_module('tlocal') + self.compile('tlocal-test', [tlocal_cffi], threads=True) + for i in range(10): + output = self.execute('tlocal-test') + assert output == "done\n" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _tlocal_cffi import ffi + import itertools + try: + import thread + g_seen = itertools.count().next + except ImportError: + import _thread as thread # py3 + g_seen = itertools.count().__next__ + tloc = thread._local() + + @ffi.def_extern() + def add1(x, y): + try: + num = tloc.num + except AttributeError: + num = tloc.num = g_seen() * 1000 + return x + y + num +""") + +ffi.set_source("_tlocal_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) From pypy.commits at gmail.com Fri Jan 15 10:01:15 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 07:01:15 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: redirect readers from embedding.rst to cffi's own embedding.rst Message-ID: <569909bb.c4b61c0a.351b2.0cbf@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81803:29ee59af043d Date: 2016-01-15 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/29ee59af043d/ Log: redirect readers from embedding.rst to cffi's own embedding.rst diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: From pypy.commits at gmail.com Fri Jan 15 10:54:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 07:54:29 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: added what is new entry Message-ID: <56991635.a867c20a.88a60.15b7@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81804:f4e332454659 Date: 2016-01-15 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/f4e332454659/ Log: added what is new entry diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,7 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: memop-simplify3 + +Further simplifying the backend operations malloc_cond_varsize and zero_array. From pypy.commits at gmail.com Fri Jan 15 10:56:13 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 07:56:13 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged memop simplify, added whatsnew entry to this branch Message-ID: <5699169d.878e1c0a.5ceee.1e82@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81805:a326280c5568 Date: 2016-01-15 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/a326280c5568/ Log: merged memop simplify, added whatsnew entry to this branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,11 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: memop-simplify3 + +Further simplifying the backend operations malloc_cond_varsize and zero_array. + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. From pypy.commits at gmail.com Fri Jan 15 11:00:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 15 Jan 2016 08:00:09 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: do not assert, but check if it is a CallDescr, in the case call_assembler, assume the default word size Message-ID: <56991789.c8b3c20a.e79c4.2a8a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81806:2b0a2ef88c35 Date: 2016-01-15 16:59 +0100 http://bitbucket.org/pypy/pypy/changeset/2b0a2ef88c35/ Log: do not assert, but check if it is a CallDescr, in the case call_assembler, assume the default word size diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -24,8 +24,7 @@ type = INT size = WORD self.ressign = True - if calldescr is not None: - assert isinstance(calldescr, CallDescr) + if calldescr is not None and isinstance(calldescr, CallDescr) type = calldescr.get_result_type() size = calldescr.get_result_size() self.ressign = calldescr.is_result_signed() From pypy.commits at gmail.com Fri Jan 15 11:33:21 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:33:21 -0800 (PST) Subject: [pypy-commit] cffi default: fixes for pypy Message-ID: <56991f51.d69c1c0a.e95bf.3328@mx.google.com> Author: Armin Rigo Branch: Changeset: r2590:169e377c8437 Date: 2016-01-15 17:33 +0100 http://bitbucket.org/cffi/cffi/changeset/169e377c8437/ Log: fixes for pypy diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -545,6 +545,12 @@ def _apply_embedding_fix(self, kwds): # must include an argument like "-lpython2.7" for the compiler if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) pythonlib = "pypy-c" else: if sys.platform == "win32": @@ -557,9 +563,9 @@ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags - libraries = kwds.get('libraries', []) + libraries = kwds.setdefault('libraries', []) if pythonlib not in libraries: - kwds['libraries'] = libraries + [pythonlib] + libraries.append(pythonlib) def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -20,7 +20,7 @@ ffi._apply_embedding_fix(kwds) ffi.set_source("_test_lib_python_found", "", **kwds) try: - ffi.compile(tmpdir=tmpdir) + ffi.compile(tmpdir=tmpdir, verbose=True) except cffi.VerificationError as e: _link_error = e else: From pypy.commits at gmail.com Fri Jan 15 11:33:53 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:33:53 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: update to cffi/abd64e2e97cb Message-ID: <56991f71.c2351c0a.2fdde.309a@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81807:60202d36307e Date: 2016-01-15 14:24 +0000 http://bitbucket.org/pypy/pypy/changeset/60202d36307e/ Log: update to cffi/abd64e2e97cb diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 5): - py.test.skip("re-enable me in version 1.5") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " From pypy.commits at gmail.com Fri Jan 15 11:33:55 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:33:55 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: Add missing files Message-ID: <56991f73.17941c0a.e7f46.2ed4@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81808:41c7ca112116 Date: 2016-01-15 16:14 +0000 http://bitbucket.org/pypy/pypy/changeset/41c7ca112116/ Log: Add missing files diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -0,0 +1,14 @@ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -0,0 +1,27 @@ +#include + +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + fflush(stdout); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -0,0 +1,86 @@ +#include +#include +#include +#ifdef PTEST_USE_THREAD +# include +# include +static sem_t done; +#endif + + +extern int add1(int, int); + + +static double time_delta(struct timeval *stop, struct timeval *start) +{ + return (stop->tv_sec - start->tv_sec) + + 1e-6 * (stop->tv_usec - start->tv_usec); +} + +static double measure(void) +{ + long long i, iterations; + int result; + struct timeval start, stop; + double elapsed; + + add1(0, 0); /* prepare off-line */ + + i = 0; + iterations = 1000; + result = gettimeofday(&start, NULL); + assert(result == 0); + + while (1) { + for (; i < iterations; i++) { + add1(((int)i) & 0xaaaaaa, ((int)i) & 0x555555); + } + result = gettimeofday(&stop, NULL); + assert(result == 0); + + elapsed = time_delta(&stop, &start); + assert(elapsed >= 0.0); + if (elapsed > 2.5) + break; + iterations = iterations * 3 / 2; + } + + return elapsed / (double)iterations; +} + +static void *start_routine(void *arg) +{ + double t = measure(); + printf("time per call: %.3g\n", t); + +#ifdef PTEST_USE_THREAD + int status = sem_post(&done); + assert(status == 0); +#endif + + return arg; +} + + +int main(void) +{ +#ifndef PTEST_USE_THREAD + start_routine(0); +#else + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + add1(0, 0); /* this is the main thread */ + + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = sem_wait(&done); + assert(status == 0); + } +#endif + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h @@ -0,0 +1,62 @@ +/************************************************************/ +#ifndef _MSC_VER +/************************************************************/ + + +#include +#include + + +/************************************************************/ +#else +/************************************************************/ + + +/* Very quick and dirty, just what I need for these tests. + Don't use directly in any real code! +*/ + +#include +#include + +typedef HANDLE sem_t; +typedef HANDLE pthread_t; + +int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + assert(value == 0); + *sem = CreateSemaphore(NULL, 0, 999, NULL); + return *sem ? 0 : -1; +} + +int sem_post(sem_t *sem) +{ + return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; +} + +int sem_wait(sem_t *sem) +{ + WaitForSingleObject(*sem, INFINITE); + return 0; +} + +DWORD WINAPI myThreadProc(LPVOID lpParameter) +{ + void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; + start_routine(NULL); + return 0; +} + +int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) +{ + assert(arg == NULL); + *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); + return *thread ? 0 : -1; +} + + +/************************************************************/ +#endif +/************************************************************/ diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -0,0 +1,43 @@ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -0,0 +1,57 @@ +#include +#include +#include "thread-test.h" + +extern int add1(int, int); +extern int add2(int, int, int); + +static sem_t done; + + +static void *start_routine_1(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_2(void *arg) +{ + int x, status; +#ifdef T2TEST_AGAIN_ADD1 + add1(-1, -1); +#endif + x = add2(1000, 200, 30); + assert(x == 1230); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + status = pthread_create(&th, NULL, start_routine_1, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + + for (i = 0; i < 2; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -0,0 +1,55 @@ +#include +#include +#include "thread-test.h" + +extern int add2(int, int, int); +extern int add3(int, int, int, int); + +static sem_t done; + + +static void *start_routine_2(void *arg) +{ + int x, status; + x = add2(40, 2, 100); + assert(x == 142); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_3(void *arg) +{ + int x, status; + x = add3(1000, 200, 30, 4); + assert(x == 1234); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < 10; i++) { + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_3, NULL); + assert(status == 0); + } + for (i = 0; i < 20; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -0,0 +1,47 @@ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int i, x, expected, status; + + expected = add1(40, 2); + assert((expected % 1000) == 42); + + for (i=0; i<10; i++) { + x = add1(50, i); + assert(x == expected + 8 + i); + } + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} From pypy.commits at gmail.com Fri Jan 15 11:33:57 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:33:57 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: fix fix fix Message-ID: <56991f75.c9bfc20a.26c68.3d29@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81809:4def39652ed6 Date: 2016-01-15 16:35 +0000 http://bitbucket.org/pypy/pypy/changeset/4def39652ed6/ Log: fix fix fix diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -545,6 +545,12 @@ def _apply_embedding_fix(self, kwds): # must include an argument like "-lpython2.7" for the compiler if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) pythonlib = "pypy-c" else: if sys.platform == "win32": @@ -557,9 +563,9 @@ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags - libraries = kwds.get('libraries', []) + libraries = kwds.setdefault('libraries', []) if pythonlib not in libraries: - kwds['libraries'] = libraries + [pythonlib] + libraries.append(pythonlib) def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -21,7 +21,7 @@ ffi._apply_embedding_fix(kwds) ffi.set_source("_test_lib_python_found", "", **kwds) try: - ffi.compile(tmpdir=tmpdir) + ffi.compile(tmpdir=tmpdir, verbose=True) except cffi.VerificationError as e: _link_error = e else: From pypy.commits at gmail.com Fri Jan 15 11:33:58 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:33:58 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: merge heads Message-ID: <56991f76.03231c0a.b3089.30aa@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81810:4573047cc25f Date: 2016-01-15 16:36 +0000 http://bitbucket.org/pypy/pypy/changeset/4573047cc25f/ Log: merge heads diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: From pypy.commits at gmail.com Fri Jan 15 11:41:03 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:41:03 -0800 (PST) Subject: [pypy-commit] pypy default: one missed "strategy" field read Message-ID: <5699211f.84e31c0a.70bdc.3172@mx.google.com> Author: Armin Rigo Branch: Changeset: r81811:a3d2fb1c4ae0 Date: 2016-01-15 17:40 +0100 http://bitbucket.org/pypy/pypy/changeset/a3d2fb1c4ae0/ Log: one missed "strategy" field read diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1084,7 +1084,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) From pypy.commits at gmail.com Fri Jan 15 11:52:53 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:52:53 -0800 (PST) Subject: [pypy-commit] pypy default: Fix two tests in kwargsdict that have a typo Message-ID: <569923e5.4c0c1c0a.ce6c5.378a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81812:c2ea9594b23c Date: 2016-01-15 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c2ea9594b23c/ Log: Fix two tests in kwargsdict that have a typo diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -92,12 +92,12 @@ values = [1, 2, 3] storage = strategy.erase((keys, values)) d = W_DictObject(space, strategy, storage) - assert (space.view_as_kwargs(d) == keys, values) + assert space.view_as_kwargs(d) == (keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() d = W_DictObject(space, strategy, storage) - assert (space.view_as_kwargs(d) == [], []) + assert space.view_as_kwargs(d) == ([], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -237,8 +237,8 @@ class AppTestW_TupleObject: def test_is_true(self): assert not () - assert (5,) - assert (5, 3) + assert bool((5,)) + assert bool((5, 3)) def test_len(self): assert len(()) == 0 From pypy.commits at gmail.com Fri Jan 15 11:52:54 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 08:52:54 -0800 (PST) Subject: [pypy-commit] pypy default: Add slots here, to make sure that nobody is using "self.storage" any longer Message-ID: <569923e6.552f1c0a.1091.3572@mx.google.com> Author: Armin Rigo Branch: Changeset: r81813:47796d1c4e17 Date: 2016-01-15 17:48 +0100 http://bitbucket.org/pypy/pypy/changeset/47796d1c4e17/ Log: Add slots here, to make sure that nobody is using "self.storage" any longer diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -43,6 +43,8 @@ class W_DictMultiObject(W_Root): """ Abstract base class that does not store a strategy. """ + __slots__ = ['space', 'dstorage'] + def get_strategy(self): raise NotImplementedError("abstract method") @@ -351,6 +353,8 @@ class W_DictObject(W_DictMultiObject): """ a regular dict object """ + __slots__ = ['dstrategy'] + def __init__(self, space, strategy, storage): W_DictMultiObject.__init__(self, space, storage) self.dstrategy = strategy @@ -365,6 +369,7 @@ class W_ModuleDictObject(W_DictMultiObject): """ a dict object for a module, that is not expected to change. It stores the strategy as a quasi-immutable field. """ + __slots__ = ['mstrategy'] _immutable_fields_ = ['mstrategy?'] def __init__(self, space, strategy, storage): From pypy.commits at gmail.com Fri Jan 15 13:40:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 15 Jan 2016 10:40:03 -0800 (PST) Subject: [pypy-commit] pypy typed-cells: slightly different approach for integrating mutable cells and immutability: Message-ID: <56993d03.ea5ec20a.4bd55.73bb@mx.google.com> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r81814:d4fbc7fa07b0 Date: 2016-01-15 19:39 +0100 http://bitbucket.org/pypy/pypy/changeset/d4fbc7fa07b0/ Log: slightly different approach for integrating mutable cells and immutability: before, an integer field went from storing a W_IntObject immutably, then switching to a IntMutableCell on the first mutation. This makes everything less type stable, leading to more bridges and making this branch fundamentally incompatible with heap profiling. Now the IntMutableCell is *always* stored, but if it is ever mutated map.ever_mutated is set to True. That means that as long as ever_mutated is False, it's safe to fold the read from the IntMutableCell away as well, in an @elidable method. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -36,19 +36,15 @@ if attr is None: return self.terminator._read_terminator(obj, selector) if ( - jit.isconstant(attr.storageindex) and + jit.isconstant(attr) and jit.isconstant(obj) and not attr.ever_mutated ): - result = self._pure_mapdict_read_storage(obj, attr.storageindex) + result = attr._pure_read(obj) else: result = obj._mapdict_read_storage(attr.storageindex) return attr._read_cell(result) - @jit.elidable - def _pure_mapdict_read_storage(self, obj, storageindex): - return obj._mapdict_read_storage(storageindex) - def write(self, obj, selector, w_value): attr = self.find_map_attr(selector) if attr is None: @@ -176,6 +172,8 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) + w_value = attr._write_cell(None, w_value) + assert w_value is not None obj._mapdict_write_storage(attr.storageindex, w_value) def materialize_r_dict(self, space, obj, dict_w): @@ -298,6 +296,13 @@ # if the flag is False, we don't need to unbox the attribute. self.can_contain_mutable_cell = False + @jit.elidable + def _pure_read(self, obj): + # this is safe even if the mapdict stores a mutable cell. the cell can + # only be changed is ever_mutated is set to True + result = obj._mapdict_read_storage(self.storageindex) + return self._read_cell(result) + def _read_cell(self, w_cell): if not self.can_contain_mutable_cell: return w_cell @@ -313,16 +318,14 @@ return None check = self._ensure_can_contain_mutable_cell() assert check - if self.ever_mutated: - return IntMutableCell(w_value.intval) + return IntMutableCell(w_value.intval) if type(w_value) is W_FloatObject: if isinstance(w_cell, FloatMutableCell): w_cell.floatvalue = w_value.floatval return None check = self._ensure_can_contain_mutable_cell() assert check - if self.ever_mutated: - return FloatMutableCell(w_value.floatval) + return FloatMutableCell(w_value.floatval) return w_value @jit.elidable diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -108,23 +108,27 @@ assert obj2.map is obj.map def test_attr_immutability(monkeypatch): + from pypy.objspace.std.intobject import W_IntObject cls = Class() obj = cls.instantiate() - obj.setdictvalue(space, "a", 10) - obj.setdictvalue(space, "b", 20) - obj.setdictvalue(space, "b", 30) - assert obj.storage == [10, 30] + obj.setdictvalue(space, "a", W_IntObject(10)) + obj.setdictvalue(space, "b", W_IntObject(20)) + obj.setdictvalue(space, "b", W_IntObject(30)) + mutcella, mutcellb = obj.storage + assert mutcella.intvalue == 10 + assert mutcellb.intvalue == 30 assert obj.map.ever_mutated == True assert obj.map.back.ever_mutated == False indices = [] + orig_pure_read = PlainAttribute._pure_read - def _pure_mapdict_read_storage(obj, storageindex): - assert storageindex == 0 - indices.append(storageindex) - return obj._mapdict_read_storage(storageindex) + def _pure_read(self, obj): + assert self.storageindex == 0 + indices.append(self.storageindex) + return orig_pure_read(self, obj) - obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + monkeypatch.setattr(PlainAttribute, "_pure_read", _pure_read) monkeypatch.setattr(jit, "isconstant", lambda c: True) assert obj.getdictvalue(space, "a") == 10 @@ -133,16 +137,20 @@ assert indices == [0, 0] obj2 = cls.instantiate() - obj2.setdictvalue(space, "a", 15) - obj2.setdictvalue(space, "b", 25) + obj2.setdictvalue(space, "a", W_IntObject(15)) + obj2.setdictvalue(space, "b", W_IntObject(25)) + mutcella, mutcellb = obj2.storage assert obj2.map is obj.map assert obj2.map.ever_mutated == True assert obj2.map.back.ever_mutated == False # mutating obj2 changes the map - obj2.setdictvalue(space, "a", 50) + obj2.setdictvalue(space, "a", W_IntObject(50)) assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map + assert obj2.storage[0] is mutcella + assert obj2.storage[1] is mutcellb + def test_attr_immutability_delete(): cls = Class() @@ -154,6 +162,21 @@ assert obj.map.ever_mutated == True assert obj.map is map1 +def test_immutable_with_mutcell(): + # even an immutable attribute will be stored as a mutcell. The reason is + # that then the type of the attribute is more predictable (eg always + # IntMutableCell and sometimes IntMutableCell and sometimes W_IntObject) + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + # make sure the attribute counts as mutable + obj.setdictvalue(space, "a", W_IntObject(4)) + # not wrapped because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 4 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.intvalue == 4 + + def test_mutcell_not_immutable(): from pypy.objspace.std.intobject import W_IntObject cls = Class() @@ -211,19 +234,6 @@ assert mutcell2 is mutcell1 -def test_no_mutcell_if_immutable(): - # don't introduce an immutable cell if the attribute seems immutable - from pypy.objspace.std.intobject import W_IntObject - cls = Class() - obj = cls.instantiate() - obj.setdictvalue(space, "a", W_IntObject(5)) - assert not obj.map.ever_mutated - - assert obj.getdictvalue(space, "a").intval == 5 - mutcell = obj._mapdict_read_storage(0) - assert mutcell.intval == 5 - - def test_mutcell_unwrap_only_if_needed(): from pypy.objspace.std.intobject import W_IntObject cls = Class() From pypy.commits at gmail.com Fri Jan 15 13:48:50 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 10:48:50 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: ready to merge Message-ID: <56993f12.8f7e1c0a.d8754.693f@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81815:e52cfa6d7883 Date: 2016-01-15 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/e52cfa6d7883/ Log: ready to merge From pypy.commits at gmail.com Fri Jan 15 13:48:51 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 10:48:51 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge cffi-static-callback-embedding Message-ID: <56993f13.6adec20a.1460b.7984@mx.google.com> Author: Armin Rigo Branch: Changeset: r81816:6c7267731b67 Date: 2016-01-15 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6c7267731b67/ Log: hg merge cffi-static-callback-embedding Add embedding support, bringing CFFI to version 1.5. diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,31 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.setdefault('libraries', []) + if pythonlib not in libraries: + libraries.append(pythonlib) + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +626,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +669,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,13 +84,6 @@ from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -149,6 +145,9 @@ return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,10 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + def get_dict_rtld_constants(): found = {} @@ -78,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -9,18 +9,18 @@ VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -41,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,146 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" +/* XXX Windows missing */ +#include +#include +#include + +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + RPyGilAllocate(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 5): - py.test.skip("re-enable me in version 1.5") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py @@ -54,3 +54,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -1719,3 +1719,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py @@ -60,11 +60,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -209,6 +214,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py @@ -0,0 +1,1 @@ +# Generated by pypy/tool/import_cffi.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") + + from _add1_cffi import ffi + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -0,0 +1,14 @@ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py @@ -0,0 +1,30 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add2(int, int, int); +""") + +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") + + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + ffi = _add2_cffi.ffi + + @ffi.def_extern() + def add2(x, y, z): + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py @@ -0,0 +1,25 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add3(int, int, int, int); +""") + +ffi.embedding_init_code(r""" + from _add3_cffi import ffi + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -0,0 +1,27 @@ +#include + +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + fflush(stdout); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int (*my_callback)(int); + int add_rec(int, int); +""") + +ffi.embedding_init_code(r""" + from _add_recursive_cffi import ffi, lib + import sys + print("preparing REC") + sys.stdout.flush() + + @ffi.def_extern() + def add_rec(x, y): + print("adding %d and %d" % (x, y)) + sys.stdout.flush() + return x + y + + x = lib.my_callback(400) + print('<<< %d >>>' % (x,)) +""") + +ffi.set_source("_add_recursive_cffi", """ +/* use CFFI_DLLEXPORT: on windows, it expands to __declspec(dllexport), + which is needed to export a variable from a dll */ +CFFI_DLLEXPORT int (*my_callback)(int); +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -0,0 +1,86 @@ +#include +#include +#include +#ifdef PTEST_USE_THREAD +# include +# include +static sem_t done; +#endif + + +extern int add1(int, int); + + +static double time_delta(struct timeval *stop, struct timeval *start) +{ + return (stop->tv_sec - start->tv_sec) + + 1e-6 * (stop->tv_usec - start->tv_usec); +} + +static double measure(void) +{ + long long i, iterations; + int result; + struct timeval start, stop; + double elapsed; + + add1(0, 0); /* prepare off-line */ + + i = 0; + iterations = 1000; + result = gettimeofday(&start, NULL); + assert(result == 0); + + while (1) { + for (; i < iterations; i++) { + add1(((int)i) & 0xaaaaaa, ((int)i) & 0x555555); + } + result = gettimeofday(&stop, NULL); + assert(result == 0); + + elapsed = time_delta(&stop, &start); + assert(elapsed >= 0.0); + if (elapsed > 2.5) + break; + iterations = iterations * 3 / 2; + } + + return elapsed / (double)iterations; +} + +static void *start_routine(void *arg) +{ + double t = measure(); + printf("time per call: %.3g\n", t); + +#ifdef PTEST_USE_THREAD + int status = sem_post(&done); + assert(status == 0); +#endif + + return arg; +} + + +int main(void) +{ +#ifndef PTEST_USE_THREAD + start_routine(0); +#else + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + add1(0, 0); /* this is the main thread */ + + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = sem_wait(&done); + assert(status == 0); + } +#endif + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py @@ -0,0 +1,22 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _perf_cffi import ffi + + @ffi.def_extern() + def add1(x, y): + return x + y +""") + +ffi.set_source("_perf_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -0,0 +1,151 @@ +# Generated by pypy/tool/import_cffi.py +import py +import sys, os, re +import shutil, subprocess, time +from pypy.module.test_lib_pypy.cffi_tests.udir import udir +import cffi + +if hasattr(sys, 'gettotalrefcount'): + py.test.skip("tried hard and failed to have these tests run " + "in a debug-mode python") + + +local_dir = os.path.dirname(os.path.abspath(__file__)) +_link_error = '?' + +def check_lib_python_found(tmpdir): + global _link_error + if _link_error == '?': + ffi = cffi.FFI() + kwds = {} + ffi._apply_embedding_fix(kwds) + ffi.set_source("_test_lib_python_found", "", **kwds) + try: + ffi.compile(tmpdir=tmpdir, verbose=True) + except cffi.VerificationError as e: + _link_error = e + else: + _link_error = None + if _link_error: + py.test.skip(str(_link_error)) + + +class EmbeddingTests: + _compiled_modules = {} + + def setup_method(self, meth): + check_lib_python_found(str(udir.ensure('embedding', dir=1))) + self._path = udir.join('embedding', meth.__name__) + if sys.platform == "win32": + self._compiled_modules.clear() # workaround + + def get_path(self): + return str(self._path.ensure(dir=1)) + + def _run(self, args, env=None): + print(args) + popen = subprocess.Popen(args, env=env, cwd=self.get_path(), + stdout=subprocess.PIPE, + universal_newlines=True) + output = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("popen failed with exit code %r: %r" % ( + err, args)) + print(output.rstrip()) + return output + + def prepare_module(self, name): + if name not in self._compiled_modules: + path = self.get_path() + filename = '%s.py' % name + # NOTE: if you have an .egg globally installed with an older + # version of cffi, this will not work, because sys.path ends + # up with the .egg before the PYTHONPATH entries. I didn't + # find a solution to that: we could hack sys.path inside the + # script run here, but we can't hack it in the same way in + # execute(). + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + output = self._run([sys.executable, os.path.join(local_dir, filename)], + env=env) + match = re.compile(r"\bFILENAME: (.+)").search(output) + assert match + dynamic_lib_name = match.group(1) + if sys.platform == 'win32': + assert dynamic_lib_name.endswith('_cffi.dll') + else: + assert dynamic_lib_name.endswith('_cffi.so') + self._compiled_modules[name] = dynamic_lib_name + return self._compiled_modules[name] + + def compile(self, name, modules, opt=False, threads=False, defines={}): + path = self.get_path() + filename = '%s.c' % name + shutil.copy(os.path.join(local_dir, filename), path) + shutil.copy(os.path.join(local_dir, 'thread-test.h'), path) + import distutils.ccompiler + curdir = os.getcwd() + try: + os.chdir(self.get_path()) + c = distutils.ccompiler.new_compiler() + print('compiling %s with %r' % (name, modules)) + extra_preargs = [] + if sys.platform == 'win32': + libfiles = [] + for m in modules: + m = os.path.basename(m) + assert m.endswith('.dll') + libfiles.append('Release\\%s.lib' % m[:-4]) + modules = libfiles + elif threads: + extra_preargs.append('-pthread') + objects = c.compile([filename], macros=sorted(defines.items()), debug=True) + c.link_executable(objects + modules, name, extra_preargs=extra_preargs) + finally: + os.chdir(curdir) + + def execute(self, name): + path = self.get_path() + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + libpath = env.get('LD_LIBRARY_PATH') + if libpath: + libpath = path + ':' + libpath + else: + libpath = path + env['LD_LIBRARY_PATH'] = libpath + print('running %r in %r' % (name, path)) + executable_name = name + if sys.platform == 'win32': + executable_name = os.path.join(path, executable_name + '.exe') + popen = subprocess.Popen([executable_name], cwd=path, env=env, + stdout=subprocess.PIPE, + universal_newlines=True) + result = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("%r failed with exit code %r" % (name, err)) + return result + + +class TestBasic(EmbeddingTests): + def test_basic(self): + add1_cffi = self.prepare_module('add1') + self.compile('add1-test', [add1_cffi]) + output = self.execute('add1-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") + + def test_two_modules(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('add2-test', [add1_cffi, add2_cffi]) + output = self.execute('add2-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "prepADD2\n" + "adding 100 and -5 and -20\n" + "got: 42 75\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py @@ -0,0 +1,53 @@ +# Generated by pypy/tool/import_cffi.py +import sys +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + +if sys.platform == 'win32': + import py + py.test.skip("written with POSIX functions") + + +class TestPerformance(EmbeddingTests): + def test_perf_single_threaded(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_1_thread(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '1'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_2_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '2'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_4_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '4'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_8_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '8'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py @@ -0,0 +1,16 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestRecursive(EmbeddingTests): + def test_recursive(self): + add_recursive_cffi = self.prepare_module('add_recursive') + self.compile('add_recursive-test', [add_recursive_cffi]) + output = self.execute('add_recursive-test') + assert output == ("preparing REC\n" + "some_callback(400)\n" + "adding 400 and 9\n" + "<<< 409 >>>\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py @@ -0,0 +1,62 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThread(EmbeddingTests): + def test_first_calls_in_parallel(self): + add1_cffi = self.prepare_module('add1') + self.compile('thread1-test', [add1_cffi], threads=True) + for i in range(50): + output = self.execute('thread1-test') + assert output == ("starting\n" + "preparing...\n" + + "adding 40 and 2\n" * 10 + + "done\n") + + def _take_out(self, text, content): + assert content in text + i = text.index(content) + return text[:i] + text[i+len(content):] + + def test_init_different_modules_in_different_threads(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], threads=True) + output = self.execute('thread2-test') + output = self._take_out(output, "preparing") + output = self._take_out(output, ".") + output = self._take_out(output, ".") + # at least the 3rd dot should be after everything from ADD2 + assert output == ("starting\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + ".\n" + "adding 40 and 2\n" + "done\n") + + def test_alt_issue(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], + threads=True, defines={'T2TEST_AGAIN_ADD1': '1'}) + output = self.execute('thread2-test') + output = self._take_out(output, "adding 40 and 2\n") + assert output == ("starting\n" + "preparing...\n" + "adding -1 and -1\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + "done\n") + + def test_load_in_parallel_more(self): + add2_cffi = self.prepare_module('add2') + add3_cffi = self.prepare_module('add3') + self.compile('thread3-test', [add2_cffi, add3_cffi], threads=True) + for i in range(150): + output = self.execute('thread3-test') + for j in range(10): + output = self._take_out(output, "adding 40 and 2 and 100\n") + output = self._take_out(output, "adding 1000, 200, 30, 4\n") + assert output == ("starting\n" + "prepADD2\n" + "done\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py @@ -0,0 +1,11 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThreadLocal(EmbeddingTests): + def test_thread_local(self): + tlocal_cffi = self.prepare_module('tlocal') + self.compile('tlocal-test', [tlocal_cffi], threads=True) + for i in range(10): + output = self.execute('tlocal-test') + assert output == "done\n" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h @@ -0,0 +1,62 @@ +/************************************************************/ +#ifndef _MSC_VER +/************************************************************/ + + +#include +#include + + +/************************************************************/ +#else +/************************************************************/ + + +/* Very quick and dirty, just what I need for these tests. + Don't use directly in any real code! +*/ + +#include +#include + +typedef HANDLE sem_t; +typedef HANDLE pthread_t; + +int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + assert(value == 0); + *sem = CreateSemaphore(NULL, 0, 999, NULL); + return *sem ? 0 : -1; +} + +int sem_post(sem_t *sem) +{ + return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; +} + +int sem_wait(sem_t *sem) +{ + WaitForSingleObject(*sem, INFINITE); + return 0; +} + +DWORD WINAPI myThreadProc(LPVOID lpParameter) +{ + void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; + start_routine(NULL); + return 0; +} + +int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) +{ + assert(arg == NULL); + *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); + return *thread ? 0 : -1; +} + + +/************************************************************/ +#endif +/************************************************************/ diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -0,0 +1,43 @@ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -0,0 +1,57 @@ +#include +#include +#include "thread-test.h" + +extern int add1(int, int); +extern int add2(int, int, int); + +static sem_t done; + + +static void *start_routine_1(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_2(void *arg) +{ + int x, status; +#ifdef T2TEST_AGAIN_ADD1 + add1(-1, -1); +#endif + x = add2(1000, 200, 30); + assert(x == 1230); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + status = pthread_create(&th, NULL, start_routine_1, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + + for (i = 0; i < 2; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -0,0 +1,55 @@ +#include +#include +#include "thread-test.h" + +extern int add2(int, int, int); +extern int add3(int, int, int, int); + +static sem_t done; + + +static void *start_routine_2(void *arg) +{ + int x, status; + x = add2(40, 2, 100); + assert(x == 142); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_3(void *arg) +{ + int x, status; + x = add3(1000, 200, 30, 4); + assert(x == 1234); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < 10; i++) { + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_3, NULL); + assert(status == 0); + } + for (i = 0; i < 20; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -0,0 +1,47 @@ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int i, x, expected, status; + + expected = add1(40, 2); + assert((expected % 1000) == 42); + + for (i=0; i<10; i++) { + x = add1(50, i); + assert(x == expected + 8 + i); + } + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _tlocal_cffi import ffi + import itertools + try: + import thread + g_seen = itertools.count().next + except ImportError: + import _thread as thread # py3 + g_seen = itertools.count().__next__ + tloc = thread._local() + + @ffi.def_extern() + def add1(x, y): + try: + num = tloc.num + except AttributeError: + num = tloc.num = g_seen() * 1000 + return x + y + num +""") + +ffi.set_source("_tlocal_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) From pypy.commits at gmail.com Fri Jan 15 13:53:26 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 10:53:26 -0800 (PST) Subject: [pypy-commit] pypy default: update whatsnew-head.rst Message-ID: <56994026.05bd1c0a.92c61.6646@mx.google.com> Author: Armin Rigo Branch: Changeset: r81817:d09d5a7e36c5 Date: 2016-01-15 19:52 +0100 http://bitbucket.org/pypy/pypy/changeset/d09d5a7e36c5/ Log: update whatsnew-head.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -114,3 +114,8 @@ .. branch: globals-quasiimmut Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. From pypy.commits at gmail.com Fri Jan 15 13:59:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Jan 2016 10:59:41 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Extract function Message-ID: <5699419d.85b01c0a.167c1.6e38@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81818:0a5616f5ef3d Date: 2016-01-15 17:50 +0000 http://bitbucket.org/pypy/pypy/changeset/0a5616f5ef3d/ Log: Extract function diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -144,7 +144,16 @@ # pure external function - fall back to the annotations # corresponding to the ll types args_s, s_result = sig_ll(fnobj) + execute = make_sandbox_trampoline(fnname, args_s, s_result) + return _annotate(rtyper, execute, args_s, s_result) +def make_sandbox_trampoline(fnname, args_s, s_result): + """Create a trampoline function with the specified signature. + + The trampoline is meant to be used in place of real calls to the external + function named 'fnname'. It marshals its input arguments, dumps them to + STDOUT, and waits for an answer on STDIN. + """ try: dump_arguments = rmarshal.get_marshaller(tuple(args_s)) load_result = rmarshal.get_loader(s_result) @@ -164,7 +173,8 @@ loader.check_finished() return result execute.__name__ = 'sandboxed_%s' % (fnname,) - return _annotate(rtyper, execute, args_s, s_result) + return execute + def _annotate(rtyper, f, args_s, s_result): ann = MixLevelHelperAnnotator(rtyper) From pypy.commits at gmail.com Fri Jan 15 13:59:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 15 Jan 2016 10:59:42 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Do sandboxing of os.* replacements at the rtyper level. Message-ID: <5699419e.0c2e1c0a.b5143.67c8@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81819:9091a198ba62 Date: 2016-01-15 18:58 +0000 http://bitbucket.org/pypy/pypy/changeset/9091a198ba62/ Log: Do sandboxing of os.* replacements at the rtyper level. Putting this in getcallable() is hackish but it's the earliest reasonable place where the function is correctly annotated. diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -29,6 +29,7 @@ from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block +from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline, _annotate class RPythonTyper(object): @@ -561,6 +562,17 @@ def getcallable(self, graph): def getconcretetype(v): return self.bindingrepr(v).lowleveltype + if self.annotator.translator.config.translation.sandbox: + try: + name = graph.func._sandbox_external_name + except AttributeError: + pass + else: + args_s = [v.annotation for v in graph.getargs()] + s_result = graph.getreturnvar().annotation + sandboxed = make_sandbox_trampoline(name, args_s, s_result) + return self.getannmixlevel().delayedfunction( + sandboxed, args_s, s_result) return getfunctionptr(graph, getconcretetype) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -926,9 +926,6 @@ def need_sandboxing(fnobj): if hasattr(fnobj, '_safe_not_sandboxed'): return not fnobj._safe_not_sandboxed - elif getattr(getattr(fnobj, '_callable', None), - '_sandbox_external_name', None): - return True else: return "if_external" diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -131,11 +131,7 @@ trampoline marshals its input arguments, dumps them to STDOUT, and waits for an answer on STDIN. """ - if getattr(getattr(fnobj, '_callable', None), - '_sandbox_external_name', None): - fnname = fnobj._callable._sandbox_external_name - else: - fnname = fnobj._name + fnname = fnobj._name if hasattr(fnobj, 'graph'): graph = fnobj.graph args_s = [v.annotation for v in graph.getargs()] From pypy.commits at gmail.com Fri Jan 15 14:03:18 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 11:03:18 -0800 (PST) Subject: [pypy-commit] pypy default: Simplify a bit the code here Message-ID: <56994276.c8b3c20a.e79c4.7537@mx.google.com> Author: Armin Rigo Branch: Changeset: r81821:e430d927cd28 Date: 2016-01-15 20:02 +0100 http://bitbucket.org/pypy/pypy/changeset/e430d927cd28/ Log: Simplify a bit the code here diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -414,14 +414,18 @@ @jit.elidable def _toint_helper(self): x = self._touint_helper() - # Haven't lost any bits, but if the sign bit is set we're in - # trouble *unless* this is the min negative number. So, - # trouble iff sign bit set && (positive || some bit set other - # than the sign bit). - sign = self.sign - if intmask(x) < 0 and (sign > 0 or (x << 1) != 0): - raise OverflowError - return intmask(intmask(x) * sign) + # Haven't lost any bits so far + if self.sign >= 0: + res = intmask(x) + if res < 0: + raise OverflowError + else: + # Use "-" on the unsigned number, not on the signed number. + # This is needed to produce valid C code. + res = intmask(-x) + if res >= 0: + raise OverflowError + return res @jit.elidable def tolonglong(self): From pypy.commits at gmail.com Fri Jan 15 14:20:24 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 15 Jan 2016 11:20:24 -0800 (PST) Subject: [pypy-commit] cffi default: fix Message-ID: <56994678.c9bfc20a.26c68.7caa@mx.google.com> Author: Armin Rigo Branch: Changeset: r2591:2bb743b8b97e Date: 2016-01-15 20:20 +0100 http://bitbucket.org/cffi/cffi/changeset/2bb743b8b97e/ Log: fix diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -194,15 +194,15 @@ give to users of your DLL. That's why the example above does this:: with open('foo.h') as f: - ffi.embedding(f.read()) + ffi.embedding_api(f.read()) - Note that a drawback of this approach is that ``ffi.embedding()`` + Note that a drawback of this approach is that ``ffi.embedding_api()`` doesn't support ``#ifdef`` directives. You may have to use a more convoluted expression like:: with open('foo.h') as f: lines = [line for line in f if not line.startswith('#')] - ffi.embedding(''.join(lines)) + ffi.embedding_api(''.join(lines)) As in the example above, you can also use the same ``foo.h`` from ``ffi.set_source()``:: @@ -304,7 +304,7 @@ a DLL-exported C function written in C directly, maybe to handle some cases before calling Python functions. To do that, you must *not* put the function's signature in ``ffi.embedding_api()``. (Note that this -requires more hacks if you use ``ffi.embedding(f.read())``.) You must +requires more hacks if you use ``ffi.embedding_api(f.read())``.) You must only write the custom function definition in ``ffi.set_source()``, and prefix it with the macro CFFI_DLLEXPORT: From pypy.commits at gmail.com Fri Jan 15 14:32:25 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 15 Jan 2016 11:32:25 -0800 (PST) Subject: [pypy-commit] pypy typed-cells: optimization: _pure_read already calls _read_cell, so no need to do it again Message-ID: <56994949.ca56c20a.19ef4.ffff86c8@mx.google.com> Author: Carl Friedrich Bolz Branch: typed-cells Changeset: r81822:dc5f27cbb7d7 Date: 2016-01-15 20:31 +0100 http://bitbucket.org/pypy/pypy/changeset/dc5f27cbb7d7/ Log: optimization: _pure_read already calls _read_cell, so no need to do it again (even though that is safe) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -40,7 +40,7 @@ jit.isconstant(obj) and not attr.ever_mutated ): - result = attr._pure_read(obj) + return attr._pure_read(obj) else: result = obj._mapdict_read_storage(attr.storageindex) return attr._read_cell(result) From pypy.commits at gmail.com Sat Jan 16 05:13:45 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 02:13:45 -0800 (PST) Subject: [pypy-commit] cffi default: Trying a different hack: stop patching SO and EXT_SUFFIX in Message-ID: <569a17d9.6408c20a.ea89b.454d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2592:9be92594ef14 Date: 2016-01-16 11:13 +0100 http://bitbucket.org/cffi/cffi/changeset/9be92594ef14/ Log: Trying a different hack: stop patching SO and EXT_SUFFIX in sysconfigdata, and instead patch directly a method in distutils.command.build_ext. Motivation: Windows, where the previous solution makes it add the wrong 'init...' in exports_symbols diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,14 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, - embedding=False): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose, - target_extension, embedding) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -38,32 +36,7 @@ os.environ[key] = value return outputfilename -def _save_val(name): - import distutils.sysconfig - config_vars = distutils.sysconfig.get_config_vars() - return config_vars.get(name, Ellipsis) - -def _restore_val(name, value): - import distutils.sysconfig - config_vars = distutils.sysconfig.get_config_vars() - config_vars[name] = value - if value is Ellipsis: - del config_vars[name] - -def _win32_hack_for_embedding(): - from distutils.msvc9compiler import MSVCCompiler - if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): - MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ - MSVCCompiler._remove_visual_c_ref - MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file - -def _win32_unhack_for_embedding(): - from distutils.msvc9compiler import MSVCCompiler - MSVCCompiler._remove_visual_c_ref = \ - MSVCCompiler._remove_visual_c_ref_CFFI_BAK - -def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, - embedding=False): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -76,25 +49,14 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - if sys.platform == 'win32' and embedding: - _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 - old_SO = _save_val('SO') - old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: - if target_extension is not None: - _restore_val('SO', target_extension) - _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') cmd_obj = dist.get_command_obj('build_ext') [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) - _restore_val('SO', old_SO) - _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) - if sys.platform == 'win32' and embedding: - _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1357,6 +1357,40 @@ parts[-1] += extension return os.path.join(outputdir, *parts), parts + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + patchlist.append((cls, name, getattr(cls, name))) + setattr(cls, name, new_meth) + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding_win32(patchlist): + from distutils.msvc9compiler import MSVCCompiler + # we must not remove the manifest when building for embedding! + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + +def _patch_for_target(patchlist, target): + from distutils.command.build_ext import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, compiler_verbose=1, target=None, **kwds): @@ -1382,36 +1416,22 @@ target = '%s.*' % module_name else: target = '*' - if target == '*': - target_module_name = module_name - target_extension = None # use default - else: - if target.endswith('.*'): - target = target[:-2] - if sys.platform == 'win32': - target += '.dll' - else: - target += '.so' - # split along the first '.' (not the last one, otherwise the - # preceeding dots are interpreted as splitting package names) - index = target.find('.') - if index < 0: - raise ValueError("target argument %r should be a file name " - "containing a '.'" % (target,)) - target_module_name = target[:index] - target_extension = target[index:] # - ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) + ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: + patchlist = [] cwd = os.getcwd() try: + if embedding and sys.platform == 'win32': + _patch_for_embedding_win32(patchlist) + if target != '*': + _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose, - target_extension, - embedding=embedding) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) + _unpatch_meths(patchlist) return outputfilename else: return ext, updated diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -219,23 +219,6 @@ x = ffi.compile(target="foo.bar.*") if sys.platform != 'win32': sofile = self.check_produced_files({ - 'foo.bar.SO': None, - 'mod_name_in_package': {'mymod.c': None, - 'mymod.o': None}}) - assert os.path.isabs(x) and os.path.samefile(x, sofile) - else: - self.check_produced_files({ - 'foo.bar.SO': None, - 'mod_name_in_package': {'mymod.c': None}, - 'Release': '?'}) - - @chdir_to_tmp - def test_api_compile_explicit_target_2(self): - ffi = cffi.FFI() - ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") - x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) - if sys.platform != 'win32': - sofile = self.check_produced_files({ 'mod_name_in_package': {'foo.bar.SO': None, 'mymod.c': None, 'mymod.o': None}}) @@ -253,15 +236,16 @@ x = ffi.compile(target="foo.bar.baz") if sys.platform != 'win32': self.check_produced_files({ - 'foo.bar.baz': None, - 'mod_name_in_package': {'mymod.c': None, + 'mod_name_in_package': {'foo.bar.baz': None, + 'mymod.c': None, 'mymod.o': None}}) - sofile = os.path.join(str(self.udir), 'foo.bar.baz') + sofile = os.path.join(str(self.udir), + 'mod_name_in_package', 'foo.bar.baz') assert os.path.isabs(x) and os.path.samefile(x, sofile) else: self.check_produced_files({ - 'foo.bar.baz': None, - 'mod_name_in_package': {'mymod.c': None}, + 'mod_name_in_package': {'foo.bar.baz': None, + 'mymod.c': None}, 'Release': '?'}) @chdir_to_tmp From pypy.commits at gmail.com Sat Jan 16 05:15:58 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 02:15:58 -0800 (PST) Subject: [pypy-commit] cffi default: win32 Message-ID: <569a185e.ca061c0a.c5eb2.304e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2593:affdd0df7951 Date: 2016-01-16 11:15 +0100 http://bitbucket.org/cffi/cffi/changeset/affdd0df7951/ Log: win32 diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -57,7 +57,7 @@ found_so = None for name in os.listdir(curdir): if (name.endswith('.so') or name.endswith('.pyd') or - name.endswith('.dylib')): + name.endswith('.dylib') or name.endswith('.dll')): found_so = os.path.join(curdir, name) # foo.so => foo parts = name.split('.') From pypy.commits at gmail.com Sat Jan 16 05:29:56 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 02:29:56 -0800 (PST) Subject: [pypy-commit] cffi default: win32: needs /MANIFEST Message-ID: <569a1ba4.c74fc20a.ae108.40c7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2594:b1e049e6357f Date: 2016-01-16 11:29 +0100 http://bitbucket.org/cffi/cffi/changeset/b1e049e6357f/ Log: win32: needs /MANIFEST diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -544,13 +544,15 @@ def _apply_embedding_fix(self, kwds): # must include an argument like "-lpython2.7" for the compiler + def ensure(key, value): + lst = kwds.setdefault(key, []) + if value not in lst: + lst.append(value) + # if '__pypy__' in sys.builtin_module_names: if hasattr(sys, 'prefix'): import os - libdir = os.path.join(sys.prefix, 'bin') - dirs = kwds.setdefault('library_dirs', []) - if libdir not in dirs: - dirs.append(libdir) + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) pythonlib = "pypy-c" else: if sys.platform == "win32": @@ -563,9 +565,9 @@ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags - libraries = kwds.setdefault('libraries', []) - if pythonlib not in libraries: - libraries.append(pythonlib) + ensure('libraries', pythonlib) + if sys.platform == "win32": + ensure('extra_link_args', '/MANIFEST') def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): From pypy.commits at gmail.com Sat Jan 16 05:38:06 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 02:38:06 -0800 (PST) Subject: [pypy-commit] cffi default: Print all commands executed with all details Message-ID: <569a1d8e.a453c20a.3d6b7.5015@mx.google.com> Author: Armin Rigo Branch: Changeset: r2595:60441c8e67c5 Date: 2016-01-16 11:37 +0100 http://bitbucket.org/cffi/cffi/changeset/60441c8e67c5/ Log: Print all commands executed with all details diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -41,9 +41,14 @@ def get_path(self): return str(self._path.ensure(dir=1)) - def _run(self, args, env=None): - print(args) - popen = subprocess.Popen(args, env=env, cwd=self.get_path(), + def _run_base(self, args, env_extra={}, **kwds): + print('RUNNING:', args, env_extra, kwds) + env = os.environ.copy() + env.update(env_extra) + return subprocess.Popen(args, env=env, **kwds) + + def _run(self, args, env_extra={}): + popen = self._run_base(args, env_extra, cwd=self.get_path(), stdout=subprocess.PIPE, universal_newlines=True) output = popen.stdout.read() @@ -64,10 +69,10 @@ # find a solution to that: we could hack sys.path inside the # script run here, but we can't hack it in the same way in # execute(). - env = os.environ.copy() - env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + env_extra = {'PYTHONPATH': + os.path.dirname(os.path.dirname(local_dir))} output = self._run([sys.executable, os.path.join(local_dir, filename)], - env=env) + env_extra=env_extra) match = re.compile(r"\bFILENAME: (.+)").search(output) assert match dynamic_lib_name = match.group(1) @@ -106,21 +111,21 @@ def execute(self, name): path = self.get_path() - env = os.environ.copy() - env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) - libpath = env.get('LD_LIBRARY_PATH') + env_extra = {} + env_extra['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + libpath = os.environ.get('LD_LIBRARY_PATH') if libpath: libpath = path + ':' + libpath else: libpath = path - env['LD_LIBRARY_PATH'] = libpath + env_extra['LD_LIBRARY_PATH'] = libpath print('running %r in %r' % (name, path)) executable_name = name if sys.platform == 'win32': executable_name = os.path.join(path, executable_name + '.exe') - popen = subprocess.Popen([executable_name], cwd=path, env=env, - stdout=subprocess.PIPE, - universal_newlines=True) + popen = self._run_base([executable_name], env_extra, cwd=path, + stdout=subprocess.PIPE, + universal_newlines=True) result = popen.stdout.read() err = popen.wait() if err: From pypy.commits at gmail.com Sat Jan 16 05:52:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 02:52:34 -0800 (PST) Subject: [pypy-commit] cffi default: fix Message-ID: <569a20f2.41dfc20a.8feba.4ab5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2596:03fe8ca059d1 Date: 2016-01-16 11:52 +0100 http://bitbucket.org/cffi/cffi/changeset/03fe8ca059d1/ Log: fix diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -102,6 +102,7 @@ assert m.endswith('.dll') libfiles.append('Release\\%s.lib' % m[:-4]) modules = libfiles + extra_preargs.append('/MANIFEST') elif threads: extra_preargs.append('-pthread') objects = c.compile([filename], macros=sorted(defines.items()), debug=True) From pypy.commits at gmail.com Sat Jan 16 09:50:11 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 06:50:11 -0800 (PST) Subject: [pypy-commit] cffi default: OS/X fix: clang has got a warning enabled by default here Message-ID: <569a58a3.2815c20a.6c9f0.ffff914d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2597:8a197405b239 Date: 2016-01-16 15:48 +0100 http://bitbucket.org/cffi/cffi/changeset/8a197405b239/ Log: OS/X fix: clang has got a warning enabled by default here diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -91,8 +91,8 @@ assert lib.sin(1.23) == math.sin(1.23) def _Wconversion(cdef, source, **kargs): - if sys.platform == 'win32': - py.test.skip("needs GCC or Clang") + if sys.platform in ('win32', 'darwin'): + py.test.skip("needs GCC") ffi = FFI() ffi.cdef(cdef) py.test.raises(VerificationError, ffi.verify, source, **kargs) From pypy.commits at gmail.com Sat Jan 16 09:53:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 06:53:33 -0800 (PST) Subject: [pypy-commit] cffi default: here too Message-ID: <569a596d.482e1c0a.800a9.ffff83df@mx.google.com> Author: Armin Rigo Branch: Changeset: r2598:93b41e913bad Date: 2016-01-16 15:53 +0100 http://bitbucket.org/cffi/cffi/changeset/93b41e913bad/ Log: here too diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -71,8 +71,8 @@ assert lib.sin(1.23) == math.sin(1.23) def _Wconversion(cdef, source, **kargs): - if sys.platform == 'win32': - py.test.skip("needs GCC or Clang") + if sys.platform in ('win32', 'darwin'): + py.test.skip("needs GCC") ffi = FFI() ffi.cdef(cdef) py.test.raises(VerificationError, ffi.verify, source, **kargs) From pypy.commits at gmail.com Sat Jan 16 10:35:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 07:35:34 -0800 (PST) Subject: [pypy-commit] cffi default: OS/X: in-progress, one test now passes Message-ID: <569a6346.ca56c20a.19ef4.ffffa054@mx.google.com> Author: Armin Rigo Branch: Changeset: r2599:c3c29cfd035a Date: 2016-01-16 16:35 +0100 http://bitbucket.org/cffi/cffi/changeset/c3c29cfd035a/ Log: OS/X: in-progress, one test now passes diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -633,7 +633,7 @@ compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' to ask for the system's default extension for dynamic libraries - (.so/.dll). + (.so/.dll/.dylib). The default is '*' when building a non-embedded C API extension, and (module_name + '.*') when building an embedded library. diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1363,18 +1363,34 @@ # around that, in the _patch_for_*() functions... def _patch_meth(patchlist, cls, name, new_meth): - patchlist.append((cls, name, getattr(cls, name))) + old = getattr(cls, name) + patchlist.append((cls, name, old)) setattr(cls, name, new_meth) + return old def _unpatch_meths(patchlist): for cls, name, old_meth in reversed(patchlist): setattr(cls, name, old_meth) -def _patch_for_embedding_win32(patchlist): - from distutils.msvc9compiler import MSVCCompiler - # we must not remove the manifest when building for embedding! - _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', - lambda self, manifest_file: manifest_file) +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + from distutils.msvc9compiler import MSVCCompiler + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from distutils.ccompiler import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) def _patch_for_target(patchlist, target): from distutils.command.build_ext import build_ext @@ -1385,6 +1401,8 @@ target = target[:-2] if sys.platform == 'win32': target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' else: target += '.so' _patch_meth(patchlist, build_ext, 'get_ext_filename', @@ -1423,8 +1441,8 @@ patchlist = [] cwd = os.getcwd() try: - if embedding and sys.platform == 'win32': - _patch_for_embedding_win32(patchlist) + if embedding: + _patch_for_embedding(patchlist) if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -4,18 +4,18 @@ .. contents:: -You can use CFFI to generate a ``.so/.dll`` which exports the API of -your choice to any C application that wants to link with this -``.so/.dll``. +You can use CFFI to generate a ``.so/.dll/.dylib`` which exports the +API of your choice to any C application that wants to link with this +``.so/.dll/.dylib``. The general idea is as follows: -* You write and execute a Python script, which produces a ``.so/.dll`` - file with the API of your choice. The script also gives some Python - code to be "frozen" inside the ``.so``. +* You write and execute a Python script, which produces a + ``.so/.dll/.dylib`` file with the API of your choice. The script + also gives some Python code to be "frozen" inside the ``.so``. -* At runtime, the C application loads this ``.so/.dll`` without having - to know that it was produced by Python and CFFI. +* At runtime, the C application loads this ``.so/.dll/.dylib`` without + having to know that it was produced by Python and CFFI. * The first time a C function is called, Python is initialized and the frozen Python code is executed. @@ -73,10 +73,10 @@ ffi.compile(target="plugin-1.5.*", verbose=True) Running the code above produces a *DLL*, i,e, a dynamically-loadable -library. It is a file with the extension ``.dll`` on Windows or -``.so`` on other platforms. As usual, it is produced by generating -some intermediate ``.c`` code and then calling the regular -platform-specific C compiler. +library. It is a file with the extension ``.dll`` on Windows, +``.dylib`` on Mac OS/X, or ``.so`` on other platforms. As usual, it +is produced by generating some intermediate ``.c`` code and then +calling the regular platform-specific C compiler. Here are some details about the methods used above: @@ -143,12 +143,14 @@ * **ffi.compile([target=...] [, verbose=True]):** make the C code and compile it. By default, it produces a file called - ``c_module_name.dll`` or ``c_module_name.so``, but the default can - be changed with the optional ``target`` keyword argument. You can - use ``target="foo.*"`` with a literal ``*`` to ask for a file called - ``foo.dll`` on Windows or ``foo.so`` elsewhere. One reason for - specifying an alternate ``target`` is to include characters not - usually allowed in Python module names, like "``plugin-1.5.*``". + ``c_module_name.dll``, ``c_module_name.dylib`` or + ``c_module_name.so``, but the default can be changed with the + optional ``target`` keyword argument. You can use + ``target="foo.*"`` with a literal ``*`` to ask for a file called + ``foo.dll`` on Windows, ``foo.dylib`` on OS/X and ``foo.so`` + elsewhere. One reason for specifying an alternate ``target`` is to + include characters not usually allowed in Python module names, like + "``plugin-1.5.*``". For more complicated cases, you can call instead ``ffi.emit_c_code("foo.c")`` and compile the resulting ``foo.c`` diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -78,6 +78,8 @@ dynamic_lib_name = match.group(1) if sys.platform == 'win32': assert dynamic_lib_name.endswith('_cffi.dll') + elif sys.platform == 'darwin': + assert dynamic_lib_name.endswith('_cffi.dylib') else: assert dynamic_lib_name.endswith('_cffi.so') self._compiled_modules[name] = dynamic_lib_name @@ -124,6 +126,8 @@ executable_name = name if sys.platform == 'win32': executable_name = os.path.join(path, executable_name + '.exe') + else: + executable_name = os.path.join('.', executable_name) popen = self._run_base([executable_name], env_extra, cwd=path, stdout=subprocess.PIPE, universal_newlines=True) From pypy.commits at gmail.com Sat Jan 16 10:37:13 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 07:37:13 -0800 (PST) Subject: [pypy-commit] cffi default: more tests pass Message-ID: <569a63a9.88d31c0a.3ad3.ffff97b1@mx.google.com> Author: Armin Rigo Branch: Changeset: r2600:bb2fe3d096a1 Date: 2016-01-16 16:37 +0100 http://bitbucket.org/cffi/cffi/changeset/bb2fe3d096a1/ Log: more tests pass diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -35,7 +35,7 @@ def setup_method(self, meth): check_lib_python_found(str(udir.ensure('embedding', dir=1))) self._path = udir.join('embedding', meth.__name__) - if sys.platform == "win32": + if sys.platform == "win32" or sys.platform == "darwin": self._compiled_modules.clear() # workaround def get_path(self): From pypy.commits at gmail.com Sat Jan 16 10:59:15 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 07:59:15 -0800 (PST) Subject: [pypy-commit] cffi default: Use mutex/condition variables instead of semaphores (for os/x) Message-ID: <569a68d3.cf821c0a.68ca1.ffff96bd@mx.google.com> Author: Armin Rigo Branch: Changeset: r2601:08bf0039b46e Date: 2016-01-16 16:59 +0100 http://bitbucket.org/cffi/cffi/changeset/08bf0039b46e/ Log: Use mutex/condition variables instead of semaphores (for os/x) diff --git a/testing/embedding/perf-test.c b/testing/embedding/perf-test.c --- a/testing/embedding/perf-test.c +++ b/testing/embedding/perf-test.c @@ -3,8 +3,9 @@ #include #ifdef PTEST_USE_THREAD # include -# include -static sem_t done; +static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER; +static int remaining; #endif @@ -54,8 +55,11 @@ printf("time per call: %.3g\n", t); #ifdef PTEST_USE_THREAD - int status = sem_post(&done); - assert(status == 0); + pthread_mutex_lock(&mutex1); + remaining -= 1; + if (!remaining) + pthread_cond_signal(&cond1); + pthread_mutex_unlock(&mutex1); #endif return arg; @@ -68,19 +72,19 @@ start_routine(0); #else pthread_t th; - int i, status = sem_init(&done, 0, 0); - assert(status == 0); + int i, status; add1(0, 0); /* this is the main thread */ + remaining = PTEST_USE_THREAD; for (i = 0; i < PTEST_USE_THREAD; i++) { status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); } - for (i = 0; i < PTEST_USE_THREAD; i++) { - status = sem_wait(&done); - assert(status == 0); - } + pthread_mutex_lock(&mutex1); + while (remaining) + pthread_cond_wait(&cond1, &mutex1); + pthread_mutex_unlock(&mutex1); #endif return 0; } diff --git a/testing/embedding/thread-test.h b/testing/embedding/thread-test.h --- a/testing/embedding/thread-test.h +++ b/testing/embedding/thread-test.h @@ -4,7 +4,41 @@ #include -#include + +/* don't include , it is not available on OS/X */ + +typedef struct { + pthread_mutex_t mutex1; + pthread_cond_t cond1; + unsigned int value; +} sem_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + sem->value = value; + return (pthread_mutex_init(&sem->mutex1, NULL) || + pthread_cond_init(&sem->cond1, NULL)); +} + +static int sem_post(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + sem->value += 1; + pthread_cond_signal(&sem->cond1); + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + +static int sem_wait(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + while (sem->value == 0) + pthread_cond_wait(&sem->cond1, &sem->mutex1); + sem->value -= 1; + pthread_mutex_unlock(&sem->mutex1); + return 0; +} /************************************************************/ @@ -22,7 +56,7 @@ typedef HANDLE sem_t; typedef HANDLE pthread_t; -int sem_init(sem_t *sem, int pshared, unsigned int value) +static int sem_init(sem_t *sem, int pshared, unsigned int value) { assert(pshared == 0); assert(value == 0); @@ -30,26 +64,26 @@ return *sem ? 0 : -1; } -int sem_post(sem_t *sem) +static int sem_post(sem_t *sem) { return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; } -int sem_wait(sem_t *sem) +static int sem_wait(sem_t *sem) { WaitForSingleObject(*sem, INFINITE); return 0; } -DWORD WINAPI myThreadProc(LPVOID lpParameter) +static DWORD WINAPI myThreadProc(LPVOID lpParameter) { void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; start_routine(NULL); return 0; } -int pthread_create(pthread_t *thread, void *attr, - void *start_routine(void *), void *arg) +static int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) { assert(arg == NULL); *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); From pypy.commits at gmail.com Sat Jan 16 11:29:31 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 08:29:31 -0800 (PST) Subject: [pypy-commit] cffi default: fix when running this test in the same process as ../cffi0/test_verify.py (found on os/x, but I don't know why it doesn't also fail on linux) Message-ID: <569a6feb.878e1c0a.5ceee.ffff9edf@mx.google.com> Author: Armin Rigo Branch: Changeset: r2602:95386637958b Date: 2016-01-16 17:25 +0100 http://bitbucket.org/cffi/cffi/changeset/95386637958b/ Log: fix when running this test in the same process as ../cffi0/test_verify.py (found on os/x, but I don't know why it doesn't also fail on linux) diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -2091,20 +2091,20 @@ old = sys.getdlopenflags() try: ffi1 = FFI() - ffi1.cdef("int foo_verify_dlopen_flags;") + ffi1.cdef("int foo_verify_dlopen_flags_1;") sys.setdlopenflags(ffi1.RTLD_GLOBAL | ffi1.RTLD_NOW) - lib1 = ffi1.verify("int foo_verify_dlopen_flags;") + lib1 = ffi1.verify("int foo_verify_dlopen_flags_1;") finally: sys.setdlopenflags(old) ffi2 = FFI() ffi2.cdef("int *getptr(void);") lib2 = ffi2.verify(""" - extern int foo_verify_dlopen_flags; - static int *getptr(void) { return &foo_verify_dlopen_flags; } + extern int foo_verify_dlopen_flags_1; + static int *getptr(void) { return &foo_verify_dlopen_flags_1; } """) p = lib2.getptr() - assert ffi1.addressof(lib1, 'foo_verify_dlopen_flags') == p + assert ffi1.addressof(lib1, 'foo_verify_dlopen_flags_1') == p def test_consider_not_implemented_function_type(): ffi = FFI() From pypy.commits at gmail.com Sat Jan 16 11:53:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 08:53:00 -0800 (PST) Subject: [pypy-commit] pypy default: Link to http://pypy.org/download.html#building-from-source, which Message-ID: <569a756c.aa5dc20a.6c0f2.ffffb50a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81823:6096976956fa Date: 2016-01-16 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/6096976956fa/ Log: Link to http://pypy.org/download.html#building-from-source, which contains the "hg unbundle" alternative diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = From pypy.commits at gmail.com Sat Jan 16 11:57:31 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 08:57:31 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge release-1.5 Message-ID: <569a767b.6a69c20a.b6a34.ffffbaf7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2605:1863e9f89956 Date: 2016-01-16 17:57 +0100 http://bitbucket.org/cffi/cffi/changeset/1863e9f89956/ Log: hg merge release-1.5 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.5.0.tar.gz - - MD5: ... + - MD5: dec8441e67880494ee881305059af656 - - SHA: ... + - SHA: fd21011ba2a3cab627001b52c69fd7274517e549 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Sat Jan 16 11:57:28 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 08:57:28 -0800 (PST) Subject: [pypy-commit] cffi release-1.5: make release branch Message-ID: <569a7678.e251c20a.edf1d.ffffb62a@mx.google.com> Author: Armin Rigo Branch: release-1.5 Changeset: r2603:a3ae6135ac09 Date: 2016-01-16 17:46 +0100 http://bitbucket.org/cffi/cffi/changeset/a3ae6135ac09/ Log: make release branch From pypy.commits at gmail.com Sat Jan 16 11:57:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 16 Jan 2016 08:57:30 -0800 (PST) Subject: [pypy-commit] cffi release-1.5: md5/sha1 Message-ID: <569a767a.a85fc20a.6917f.ffffbc68@mx.google.com> Author: Armin Rigo Branch: release-1.5 Changeset: r2604:904eb4b94a4e Date: 2016-01-16 17:56 +0100 http://bitbucket.org/cffi/cffi/changeset/904eb4b94a4e/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.5.0.tar.gz - - MD5: ... + - MD5: dec8441e67880494ee881305059af656 - - SHA: ... + - SHA: fd21011ba2a3cab627001b52c69fd7274517e549 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Sat Jan 16 15:19:16 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 16 Jan 2016 12:19:16 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix ref leak in test Message-ID: <569aa5c4.2467c20a.c3cd8.fffffb2c@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81824:e44851036099 Date: 2016-01-16 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/e44851036099/ Log: fix ref leak in test diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -695,7 +695,7 @@ _CMPDOC(MemberDescr, new->d_member->doc, new->d_member->name); } else if (_TESTDOC2(GetSetDescr)) { - //_CMPDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + _CMPDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); } else if (_TESTDOC2(MethodDescr)) { _CMPDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); @@ -706,6 +706,7 @@ doc_attr = PyObject_GetAttrString(obj, "__doc__"); if (doc_attr == NULL || doc_attr == Py_None) { PyErr_Format(PyExc_RuntimeError, "object %s", msg); + Py_XDECREF(doc_attr); return NULL; } From pypy.commits at gmail.com Sat Jan 16 15:19:17 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 16 Jan 2016 12:19:17 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add make_typedescr for W_GetSetPropertyEx <-> PyGetSetDescrObject conversion Message-ID: <569aa5c5.e16ec20a.5f2da.0086@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81825:954e2d6bb370 Date: 2016-01-16 22:17 +0200 http://bitbucket.org/pypy/pypy/changeset/954e2d6bb370/ Log: add make_typedescr for W_GetSetPropertyEx <-> PyGetSetDescrObject conversion diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -59,9 +59,8 @@ obj = getattr(module.fooType, m) docstring = obj.__doc__ if not docstring: - raises(RuntimeError, module.cmp_docstring, obj, 'random') + raises(RuntimeError, module.cmp_docstring, obj, 'xxxrandomxxx') else: - import pdb;pdb.set_trace() module.cmp_docstring(obj, docstring) assert str(type(module.fooType.int_member)) == "" diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -117,6 +117,14 @@ cpython_struct("PyMemberDescrObject", PyMemberDescrObjectFields, PyMemberDescrObjectStruct, level=2) +PyGetSetDescrObjectStruct = lltype.ForwardReference() +PyGetSetDescrObject = lltype.Ptr(PyGetSetDescrObjectStruct) +PyGetSetDescrObjectFields = PyDescrObjectFields + ( + ("d_getset", lltype.Ptr(PyGetSetDef)), + ) +cpython_struct("PyGetSetDescrObject", PyGetSetDescrObjectFields, + PyGetSetDescrObjectStruct, level=2) + @bootstrap_function def init_memberdescrobject(space): make_typedescr(W_MemberDescr.typedef, @@ -124,6 +132,10 @@ attach=memberdescr_attach, realize=memberdescr_realize, ) + make_typedescr(W_GetSetPropertyEx.typedef, + basestruct=PyGetSetDescrObject.TO, + attach=getsetdescr_attach, + ) def memberdescr_attach(space, py_obj, w_obj): """ @@ -146,6 +158,16 @@ state.set_lifeline(w_obj, obj) return w_obj +def getsetdescr_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyGetSetDescrObject with the given W_GetSetPropertyEx + object. The values must not be modified. + """ + py_getsetdescr = rffi.cast(PyGetSetDescrObject, py_obj) + # XXX assign to d_dname, d_type? + assert isinstance(w_obj, W_GetSetPropertyEx) + py_getsetdescr.c_d_getset = w_obj.getset + def convert_getset_defs(space, dict_w, getsets, w_type): getsets = rffi.cast(rffi.CArrayPtr(PyGetSetDef), getsets) if getsets: From pypy.commits at gmail.com Sun Jan 17 10:45:04 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 17 Jan 2016 07:45:04 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Do sandboxing of register_external() stuff at the rtyper level Message-ID: <569bb700.84c9c20a.76280.0bd3@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81826:7a2c1c67667b Date: 2016-01-17 15:44 +0000 http://bitbucket.org/pypy/pypy/changeset/7a2c1c67667b/ Log: Do sandboxing of register_external() stuff at the rtyper level diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -46,6 +46,12 @@ impl = getattr(self, 'lltypeimpl', None) fakeimpl = getattr(self, 'lltypefakeimpl', self.instance) if impl: + if (rtyper.annotator.translator.config.translation.sandbox + and not self.safe_not_sandboxed): + from rpython.translator.sandbox.rsandbox import ( + make_sandbox_trampoline) + impl = make_sandbox_trampoline( + self.name, signature_args, s_result) if hasattr(self, 'lltypefakeimpl'): # If we have both an llimpl and an llfakeimpl, # we need a wrapper that selects the proper one and calls it @@ -74,15 +80,10 @@ return original_impl(%s) """ % (args, args, args)) in d impl = func_with_new_name(d['ll_wrapper'], name + '_wrapper') - if rtyper.annotator.translator.config.translation.sandbox: - impl._dont_inline_ = True # store some attributes to the 'impl' function, where # the eventual call to rtyper.getcallable() will find them # and transfer them to the final lltype.functionptr(). - impl._llfnobjattrs_ = { - '_name': self.name, - '_safe_not_sandboxed': self.safe_not_sandboxed, - } + impl._llfnobjattrs_ = {'_name': self.name} obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) else: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -29,7 +29,7 @@ from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block -from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline, _annotate +from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline class RPythonTyper(object): diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -905,13 +905,7 @@ def new_funcnode(db, T, obj, forcename=None): sandbox = db.sandbox and need_sandboxing(obj) if sandbox: - if hasattr(obj, 'graph') and sandbox != 'if_external': - graph = rsandbox.get_external_function_sandbox_graph( - obj, db.translator.rtyper) - obj.__dict__['graph'] = graph - obj.__dict__.pop('_safe_not_sandboxed', None) - obj.__dict__.pop('external', None) - elif getattr(obj, 'external', None) is not None: + if getattr(obj, 'external', None) is not None: obj.__dict__['graph'] = rsandbox.get_sandbox_stub( obj, db.translator.rtyper) obj.__dict__.pop('_safe_not_sandboxed', None) From pypy.commits at gmail.com Sun Jan 17 10:47:50 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Jan 2016 07:47:50 -0800 (PST) Subject: [pypy-commit] cffi default: Don't blow away PYTHONPATH Message-ID: <569bb7a6.46bb1c0a.7c11b.0bab@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2607:b1196289cab3 Date: 2016-01-17 07:47 -0800 http://bitbucket.org/cffi/cffi/changeset/b1196289cab3/ Log: Don't blow away PYTHONPATH Rather parse it, and prefix our base directory, if necessary. diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -29,6 +29,14 @@ py.test.skip(str(_link_error)) +def prefix_pythonpath(): + cffi_base = os.path.dirname(os.path.dirname(local_dir)) + pythonpath = os.environ.get('PYTHONPATH', '').split(':') + if cffi_base not in pythonpath: + pythonpath.insert(0, cffi_base) + return ':'.join(pythonpath) + + class EmbeddingTests: _compiled_modules = {} @@ -69,8 +77,7 @@ # find a solution to that: we could hack sys.path inside the # script run here, but we can't hack it in the same way in # execute(). - env_extra = {'PYTHONPATH': - os.path.dirname(os.path.dirname(local_dir))} + env_extra = {'PYTHONPATH': prefix_pythonpath()} output = self._run([sys.executable, os.path.join(local_dir, filename)], env_extra=env_extra) match = re.compile(r"\bFILENAME: (.+)").search(output) @@ -114,8 +121,7 @@ def execute(self, name): path = self.get_path() - env_extra = {} - env_extra['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + env_extra = {'PYTHONPATH': prefix_pythonpath()} libpath = os.environ.get('LD_LIBRARY_PATH') if libpath: libpath = path + ':' + libpath From pypy.commits at gmail.com Sun Jan 17 10:47:48 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Jan 2016 07:47:48 -0800 (PST) Subject: [pypy-commit] cffi default: Missing .c and .h files Message-ID: <569bb7a4.c615c20a.9c2f5.181d@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2606:5d2a59af1b06 Date: 2016-01-17 07:46 -0800 http://bitbucket.org/cffi/cffi/changeset/5d2a59af1b06/ Log: Missing .c and .h files Required for the new embedding test suite. diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ recursive-include cffi *.py *.h recursive-include c *.c *.h *.asm *.py win64.obj -recursive-include testing *.py +recursive-include testing *.py *.c *.h recursive-include doc *.py *.rst Makefile *.bat -recursive-include demo py.cleanup *.py manual.c +recursive-include demo py.cleanup *.py embedding_test.c manual.c include AUTHORS LICENSE setup.py setup_base.py From pypy.commits at gmail.com Sun Jan 17 11:00:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Jan 2016 08:00:14 -0800 (PST) Subject: [pypy-commit] cffi default: Use os.pathsep instead of directly ':' Message-ID: <569bba8e.8f7e1c0a.d8754.11f9@mx.google.com> Author: Armin Rigo Branch: Changeset: r2608:5e4c30fb3a24 Date: 2016-01-17 17:00 +0100 http://bitbucket.org/cffi/cffi/changeset/5e4c30fb3a24/ Log: Use os.pathsep instead of directly ':' diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -31,10 +31,10 @@ def prefix_pythonpath(): cffi_base = os.path.dirname(os.path.dirname(local_dir)) - pythonpath = os.environ.get('PYTHONPATH', '').split(':') + pythonpath = os.environ.get('PYTHONPATH', '').split(os.pathsep) if cffi_base not in pythonpath: pythonpath.insert(0, cffi_base) - return ':'.join(pythonpath) + return os.pathsep.join(pythonpath) class EmbeddingTests: From pypy.commits at gmail.com Sun Jan 17 13:42:37 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Jan 2016 10:42:37 -0800 (PST) Subject: [pypy-commit] cffi default: Allow testing against installed package (which has egg_info already) Message-ID: <569be09d.8a58c20a.921f0.550b@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2609:2eb9c68c1515 Date: 2016-01-17 10:42 -0800 http://bitbucket.org/cffi/cffi/changeset/2eb9c68c1515/ Log: Allow testing against installed package (which has egg_info already) diff --git a/testing/cffi1/test_zdist.py b/testing/cffi1/test_zdist.py --- a/testing/cffi1/test_zdist.py +++ b/testing/cffi1/test_zdist.py @@ -48,7 +48,8 @@ import setuptools except ImportError: py.test.skip("setuptools not found") - self.run(['setup.py', 'egg_info'], cwd=self.rootdir) + if os.path.exists(os.path.join(self.rootdir, 'setup.py')): + self.run(['setup.py', 'egg_info'], cwd=self.rootdir) TestDist._setuptools_ready = True def check_produced_files(self, content, curdir=None): From pypy.commits at gmail.com Sun Jan 17 15:17:30 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Jan 2016 12:17:30 -0800 (PST) Subject: [pypy-commit] cffi default: Support extensions for pydebug cpythons Message-ID: <569bf6da.4c301c0a.1e7ab.5f5a@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2610:b848a2fed6c7 Date: 2016-01-17 12:16 -0800 http://bitbucket.org/cffi/cffi/changeset/b848a2fed6c7/ Log: Support extensions for pydebug cpythons diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -565,6 +565,8 @@ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags + elif hasattr(sys, 'gettotalrefcount'): + pythonlib += '_d' ensure('libraries', pythonlib) if sys.platform == "win32": ensure('extra_link_args', '/MANIFEST') diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -4,10 +4,6 @@ from testing.udir import udir import cffi -if hasattr(sys, 'gettotalrefcount'): - py.test.skip("tried hard and failed to have these tests run " - "in a debug-mode python") - local_dir = os.path.dirname(os.path.abspath(__file__)) _link_error = '?' From pypy.commits at gmail.com Sun Jan 17 15:52:32 2016 From: pypy.commits at gmail.com (stefanor) Date: Sun, 17 Jan 2016 12:52:32 -0800 (PST) Subject: [pypy-commit] cffi default: Don't hardcode _d. If that is the extension, it'll be in sysconfig Message-ID: <569bff10.6918c20a.a5769.6e6d@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2611:a3f2aad89ff0 Date: 2016-01-17 12:52 -0800 http://bitbucket.org/cffi/cffi/changeset/a3f2aad89ff0/ Log: Don't hardcode _d. If that is the extension, it'll be in sysconfig diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,4 +1,4 @@ -import sys, types +import sys, sysconfig, types from .lock import allocate_lock try: @@ -557,16 +557,14 @@ else: if sys.platform == "win32": template = "python%d%d" - if sys.flags.debug: - template = template + '_d' else: template = "python%d.%d" pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags - elif hasattr(sys, 'gettotalrefcount'): - pythonlib += '_d' + elif sysconfig.get_config_var('DEBUG_EXT'): + pythonlib += sysconfig.get_config_var('DEBUG_EXT') ensure('libraries', pythonlib) if sys.platform == "win32": ensure('extra_link_args', '/MANIFEST') From pypy.commits at gmail.com Sun Jan 17 16:01:47 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 17 Jan 2016 13:01:47 -0800 (PST) Subject: [pypy-commit] cffi default: Windows uses '_d' even though I'm pretty sure DEBUG_EXT is a Debian-only Message-ID: <569c013b.cf821c0a.68ca1.68b0@mx.google.com> Author: Armin Rigo Branch: Changeset: r2612:ab31f1f18faf Date: 2016-01-17 22:01 +0100 http://bitbucket.org/cffi/cffi/changeset/ab31f1f18faf/ Log: Windows uses '_d' even though I'm pretty sure DEBUG_EXT is a Debian- only extension diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -557,14 +557,16 @@ else: if sys.platform == "win32": template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' else: template = "python%d.%d" + if sysconfig.get_config_var('DEBUG_EXT'): + template += sysconfig.get_config_var('DEBUG_EXT') pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags - elif sysconfig.get_config_var('DEBUG_EXT'): - pythonlib += sysconfig.get_config_var('DEBUG_EXT') ensure('libraries', pythonlib) if sys.platform == "win32": ensure('extra_link_args', '/MANIFEST') From pypy.commits at gmail.com Mon Jan 18 02:19:47 2016 From: pypy.commits at gmail.com (sbauman) Date: Sun, 17 Jan 2016 23:19:47 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Test case for a large number of immutable field references Message-ID: <569c9213.85e41c0a.93cdb.ffffe454@mx.google.com> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r81827:5efe5243ae95 Date: 2016-01-18 02:16 -0500 http://bitbucket.org/pypy/pypy/changeset/5efe5243ae95/ Log: Test case for a large number of immutable field references diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -9266,6 +9266,123 @@ a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) self.optimize_loop(ops, expected, jump_values=[a]) + def test_large_number_of_immutable_references(self): + ops = """ + [p0] + i0 = getfield_gc_i(p0, descr=bigadescr) + i1 = getfield_gc_i(p0, descr=bigbdescr) + i2 = getfield_gc_i(p0, descr=bigcdescr) + i3 = getfield_gc_i(p0, descr=bigddescr) + i4 = getfield_gc_i(p0, descr=bigedescr) + i5 = getfield_gc_i(p0, descr=bigfdescr) + i6 = getfield_gc_i(p0, descr=biggdescr) + i7 = getfield_gc_i(p0, descr=bighdescr) + i8 = getfield_gc_i(p0, descr=bigidescr) + i9 = getfield_gc_i(p0, descr=bigjdescr) + i10 = getfield_gc_i(p0, descr=bigkdescr) + i11 = getfield_gc_i(p0, descr=bigldescr) + i12 = getfield_gc_i(p0, descr=bigmdescr) + i13 = getfield_gc_i(p0, descr=bigndescr) + i14 = getfield_gc_i(p0, descr=bigodescr) + i15 = getfield_gc_i(p0, descr=bigpdescr) + i16 = getfield_gc_i(p0, descr=bigqdescr) + i17 = getfield_gc_i(p0, descr=bigrdescr) + i18 = getfield_gc_i(p0, descr=bigsdescr) + i19 = getfield_gc_i(p0, descr=bigtdescr) + i20 = getfield_gc_i(p0, descr=bigudescr) + i21 = getfield_gc_i(p0, descr=bigvdescr) + i22 = getfield_gc_i(p0, descr=bigwdescr) + i23 = getfield_gc_i(p0, descr=bigxdescr) + i24 = getfield_gc_i(p0, descr=bigydescr) + i25 = getfield_gc_i(p0, descr=bigzdescr) + i27 = getfield_gc_i(p0, descr=bigbdescr) + i28 = getfield_gc_i(p0, descr=bigcdescr) + i29 = getfield_gc_i(p0, descr=bigddescr) + i30 = getfield_gc_i(p0, descr=bigedescr) + i31 = getfield_gc_i(p0, descr=bigfdescr) + i32 = getfield_gc_i(p0, descr=biggdescr) + i33 = getfield_gc_i(p0, descr=bighdescr) + i34 = getfield_gc_i(p0, descr=bigidescr) + i35 = getfield_gc_i(p0, descr=bigjdescr) + i36 = getfield_gc_i(p0, descr=bigkdescr) + i37 = getfield_gc_i(p0, descr=bigldescr) + i38 = getfield_gc_i(p0, descr=bigmdescr) + i39 = getfield_gc_i(p0, descr=bigndescr) + i40 = getfield_gc_i(p0, descr=bigodescr) + i41 = getfield_gc_i(p0, descr=bigpdescr) + i42 = getfield_gc_i(p0, descr=bigqdescr) + i43 = getfield_gc_i(p0, descr=bigrdescr) + i44 = getfield_gc_i(p0, descr=bigsdescr) + i45 = getfield_gc_i(p0, descr=bigtdescr) + i46 = getfield_gc_i(p0, descr=bigudescr) + i47 = getfield_gc_i(p0, descr=bigvdescr) + i48 = getfield_gc_i(p0, descr=bigwdescr) + i49 = getfield_gc_i(p0, descr=bigxdescr) + i50 = getfield_gc_i(p0, descr=bigydescr) + i51 = getfield_gc_i(p0, descr=bigzdescr) + i26 = getfield_gc_i(p0, descr=bigadescr) + i99 = int_add(i26, i51) + escape_i(i27) + escape_i(i28) + escape_i(i29) + escape_i(i30) + escape_i(i31) + escape_i(i32) + escape_i(i33) + escape_i(i34) + escape_i(i35) + escape_i(i36) + escape_i(i37) + escape_i(i38) + escape_i(i39) + escape_i(i40) + escape_i(i41) + escape_i(i42) + escape_i(i43) + escape_i(i44) + escape_i(i45) + escape_i(i46) + escape_i(i47) + escape_i(i48) + escape_i(i49) + escape_i(i50) + escape_i(i51) + escape_i(i26) + escape_i(i99) + jump(p0) + """ + expected = """ + [p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99] + escape_i(i1) + escape_i(i2) + escape_i(i3) + escape_i(i4) + escape_i(i5) + escape_i(i6) + escape_i(i7) + escape_i(i8) + escape_i(i9) + escape_i(i10) + escape_i(i11) + escape_i(i12) + escape_i(i13) + escape_i(i14) + escape_i(i15) + escape_i(i16) + escape_i(i17) + escape_i(i18) + escape_i(i19) + escape_i(i20) + escape_i(i21) + escape_i(i22) + escape_i(i23) + escape_i(i24) + escape_i(i25) + escape_i(i0) + escape_i(i99) + jump(p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,4 +1,4 @@ -import py, random +import py, random, string from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -122,7 +122,14 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + + big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] + BIG = lltype.GcForwardReference() + BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) + + for field, _ in big_fields: + locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) + node = lltype.malloc(NODE) node.value = 5 node.next = node From pypy.commits at gmail.com Mon Jan 18 02:50:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 17 Jan 2016 23:50:50 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed syntax error Message-ID: <569c995a.a85fc20a.6917f.0384@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81828:12cb71eb84d2 Date: 2016-01-18 08:50 +0100 http://bitbucket.org/pypy/pypy/changeset/12cb71eb84d2/ Log: fixed syntax error diff --git a/rpython/jit/backend/test/zll_stress.py b/rpython/jit/backend/test/zll_stress.py --- a/rpython/jit/backend/test/zll_stress.py +++ b/rpython/jit/backend/test/zll_stress.py @@ -19,4 +19,5 @@ r = Random() r.jumpahead(piece*99999999) for i in range(piece*per_piece, (piece+1)*per_piece): + print " i = %d; r.setstate(%s)" % (i, r.getstate()) check_random_function(cpu, LLtypeOperationBuilder, r, i, total_iterations) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -24,7 +24,7 @@ type = INT size = WORD self.ressign = True - if calldescr is not None and isinstance(calldescr, CallDescr) + if calldescr is not None and isinstance(calldescr, CallDescr): type = calldescr.get_result_type() size = calldescr.get_result_size() self.ressign = calldescr.is_result_signed() From pypy.commits at gmail.com Mon Jan 18 03:14:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 00:14:09 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: dict comprehension rewrite to loop overwrote parameter variable of the function Message-ID: <569c9ed1.11181c0a.71b03.0165@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81829:8c4d49ec746f Date: 2016-01-18 09:13 +0100 http://bitbucket.org/pypy/pypy/changeset/8c4d49ec746f/ Log: dict comprehension rewrite to loop overwrote parameter variable of the function diff --git a/rpython/jit/backend/test/zll_stress.py b/rpython/jit/backend/test/zll_stress.py --- a/rpython/jit/backend/test/zll_stress.py +++ b/rpython/jit/backend/test/zll_stress.py @@ -19,5 +19,4 @@ r = Random() r.jumpahead(piece*99999999) for i in range(piece*per_piece, (piece+1)*per_piece): - print " i = %d; r.setstate(%s)" % (i, r.getstate()) check_random_function(cpu, LLtypeOperationBuilder, r, i, total_iterations) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -195,6 +195,10 @@ if even.is_even(): # found an even registers that is actually free odd = REGS[even.value+1] + if odd not in r.MANAGED_REGS: + # makes no sense to use this register! + i -= 1 + continue if odd not in self.free_regs: # sadly odd is not free, but for spilling # we found a candidate @@ -215,7 +219,11 @@ # a candidate? odd = even even = REGS[odd.value-1] - if even in r.MANAGED_REGS and even not in self.free_regs: + if even not in r.MANAGED_REGS: + # makes no sense to use this register! + i -= 1 + continue + if even not in self.free_regs: # yes even might be a candidate # this means that odd is free, but not even candidates[even] = True @@ -267,8 +275,8 @@ # require one spill, thus we need to spill two! # this is a rare case! reverse_mapping = {} - for var, reg in self.reg_bindings.items(): - reverse_mapping[reg] = var + for v, reg in self.reg_bindings.items(): + reverse_mapping[reg] = v # always take the first for i, reg in enumerate(r.MANAGED_REGS): if i % 2 == 1: From pypy.commits at gmail.com Mon Jan 18 04:09:10 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 01:09:10 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: reverted rlib/constant.py Message-ID: <569cabb6.ccaa1c0a.cdb63.130b@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81831:c2e76c7a5755 Date: 2016-01-18 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/c2e76c7a5755/ Log: reverted rlib/constant.py diff --git a/rpython/rlib/constant.py b/rpython/rlib/constant.py --- a/rpython/rlib/constant.py +++ b/rpython/rlib/constant.py @@ -11,6 +11,7 @@ DBL_MIN = rffi_platform.DefinedConstantDouble('DBL_MIN') DBL_MANT_DIG = rffi_platform.ConstantInteger('DBL_MANT_DIG') + for k, v in rffi_platform.configure(CConfig).items(): assert v is not None, "no value found for %r" % k globals()[k] = v From pypy.commits at gmail.com Mon Jan 18 04:09:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 01:09:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed double stack frame allocation from stacklet, zpickle requiring them pass Message-ID: <569cabb4.84e31c0a.70bdc.11cb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81830:737d6e63553a Date: 2016-01-18 10:07 +0100 http://bitbucket.org/pypy/pypy/changeset/737d6e63553a/ Log: removed double stack frame allocation from stacklet, zpickle requiring them pass diff --git a/rpython/rlib/constant.py b/rpython/rlib/constant.py --- a/rpython/rlib/constant.py +++ b/rpython/rlib/constant.py @@ -11,7 +11,6 @@ DBL_MIN = rffi_platform.DefinedConstantDouble('DBL_MIN') DBL_MANT_DIG = rffi_platform.ConstantInteger('DBL_MANT_DIG') - for k, v in rffi_platform.configure(CConfig).items(): assert v is not None, "no value found for %r" % k globals()[k] = v diff --git a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h --- a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h @@ -7,16 +7,8 @@ { void *result; __asm__ volatile ( - /* By Vaibhav Sood & Armin Rigo, with some copying from - the Stackless version by Kristjan Valur Jonsson */ - - /* Save all 18 volatile GP registers, 18 volatile FP regs, and 12 - volatile vector regs. We need a stack frame of 144 bytes for FPR, - 144 bytes for GPR, 192 bytes for VR plus 48 bytes for the standard - stackframe = 528 bytes (a multiple of 16). */ - - //"mflr 0\n" /* Save LR into 16(r1) */ - //"stg 0, 16(1)\n" + /* The Stackless version by Kristjan Valur Jonsson, + ported to s390x by Richard Plangger */ "stmg 6,15,48(15)\n" @@ -25,17 +17,15 @@ "std 4,144(15)\n" "std 6,152(15)\n" - "lay 15,-160(15)\n" /* Create stack frame */ - "lgr 10, %[restore_state]\n" /* save 'restore_state' for later */ "lgr 11, %[extra]\n" /* save 'extra' for later */ "lgr 14, %[save_state]\n" /* move 'save_state' into r14 for branching */ "lgr 2, 15\n" /* arg 1: current (old) stack pointer */ "lgr 3, 11\n" /* arg 2: extra */ - "lay 15, -160(15)\n" /* create temp stack space (see below) */ + "lay 15,-160(15)\n" /* create stack frame */ "basr 14, 14\n" /* call save_state() */ - "lay 15, 160(15)\n" /* destroy temp stack space */ + "lay 15, 160(15)\n" /* destroy stack frame */ "cgij 2, 0, 8, zero\n" /* skip the rest if the return value is null */ From pypy.commits at gmail.com Mon Jan 18 05:09:11 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Jan 2016 02:09:11 -0800 (PST) Subject: [pypy-commit] pypy default: test and fix for the sandboxing issue on 32-bit (test_lseek) Message-ID: <569cb9c7.ccaa1c0a.cdb63.2a05@mx.google.com> Author: Armin Rigo Branch: Changeset: r81833:4fd739667032 Date: 2016-01-18 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/4fd739667032/ Log: test and fix for the sandboxing issue on 32-bit (test_lseek) diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -90,6 +90,8 @@ dumper._annenforceargs_ = [s_list_of_chars, s_obj] def add_loader(s_obj, loader): + # 's_obj' should be the **least general annotation** that we're + # interested in, somehow loaders.append((s_obj, loader)) def get_dumper_annotation(dumper): @@ -187,6 +189,14 @@ r_32bits_mask = r_longlong(0xFFFFFFFF) +def load_longlong_nonneg(loader): + x = load_longlong(loader) + if x < 0: + raise ValueError("expected a non-negative longlong") + return x +add_loader(annmodel.SomeInteger(knowntype=r_longlong, nonneg=True), + load_longlong_nonneg) + def load_longlong(loader): if readchr(loader) != TYPE_INT64: raise ValueError("expected a longlong") diff --git a/rpython/rlib/test/test_rmarshal.py b/rpython/rlib/test/test_rmarshal.py --- a/rpython/rlib/test/test_rmarshal.py +++ b/rpython/rlib/test/test_rmarshal.py @@ -190,3 +190,13 @@ assert sttuple[4] == st[4] assert sttuple[5] == st[5] assert len(sttuple) == 10 + +def test_longlong(): + # get_loader for (r_longolong, nonneg=True) used to return + # load_int_nonneg on 32-bit, instead of load_longlong. + for nonneg in [True, False]: + s_longlong = annmodel.SomeInteger(knowntype=r_longlong, nonneg=nonneg) + load = get_loader(s_longlong) + loader = Loader("I\x01\x23\x45\x67\x89\xab\xcd\x0e") + res = load(loader) + assert res == 0x0ecdab8967452301 From pypy.commits at gmail.com Mon Jan 18 05:09:08 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 18 Jan 2016 02:09:08 -0800 (PST) Subject: [pypy-commit] pypy default: Missing headers Message-ID: <569cb9c4.9a6f1c0a.aa828.24b9@mx.google.com> Author: Armin Rigo Branch: Changeset: r81832:72077eb61f4c Date: 2016-01-18 10:42 +0100 http://bitbucket.org/pypy/pypy/changeset/72077eb61f4c/ Log: Missing headers diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -13,6 +13,7 @@ # Sandboxing code generator for external functions # +from rpython.rlib import rposix from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name @@ -25,12 +26,12 @@ # a version of os.read() and os.write() that are not mangled # by the sandboxing mechanism -ll_read_not_sandboxed = rffi.llexternal('read', +ll_read_not_sandboxed = rposix.external('read', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True) -ll_write_not_sandboxed = rffi.llexternal('write', +ll_write_not_sandboxed = rposix.external('write', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True) From pypy.commits at gmail.com Mon Jan 18 07:52:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 04:52:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: replaced load 32bit imm with load imm that can handle 64 bit too Message-ID: <569ce01b.624fc20a.9db1e.6ff8@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81834:045f28548d0a Date: 2016-01-18 13:51 +0100 http://bitbucket.org/pypy/pypy/changeset/045f28548d0a/ Log: replaced load 32bit imm with load imm that can handle 64 bit too diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1227,8 +1227,7 @@ gcmap = lltype.nullptr(jitframe.GCMAP) self.load_gcmap(self.mc, r.r2, gcmap) - assert fail_descr_loc.getint() <= 2**32-1 - self.mc.LGFI(r.r3, fail_descr_loc) + self.mc.load_imm(r.r3, fail_descr_loc.getint()) self.mc.STG(r.r3, l.addr(ofs, r.SPP)) self.mc.STG(r.r2, l.addr(ofs2, r.SPP)) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -694,16 +694,19 @@ def _cmp_guard_class(self, op, locs, regalloc): offset = self.cpu.vtable_offset + loc_ptr = locs[0] + loc_classptr = locs[1] if offset is not None: # could be one instruction shorter, but don't care because # it's not this case that is commonly translated - self.mc.LG(r.SCRATCH, l.addr(offset, locs[0])) + self.mc.LG(r.SCRATCH, l.addr(offset, loc_ptr)) self.mc.load_imm(r.SCRATCH2, locs[1].value) self.mc.cmp_op(r.SCRATCH, r.SCRATCH2) else: + classptr = loc_classptr.value expected_typeid = (self.cpu.gc_ll_descr - .get_typeid_from_classptr_if_gcremovetypeptr(locs[1].value)) - self._cmp_guard_gc_type(locs[0], expected_typeid) + .get_typeid_from_classptr_if_gcremovetypeptr(classptr)) + self._cmp_guard_gc_type(loc_ptr, expected_typeid) def _read_typeid(self, targetreg, loc_ptr): # Note that the typeid half-word is at offset 0 on a little-endian @@ -753,10 +756,10 @@ offset2 = self.cpu.subclassrange_min_offset if offset is not None: # read this field to get the vtable pointer - self.mc.LG(r.SCRATCH2, l.addr(offset, loc_object)) + self.mc.LG(r.SCRATCH, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field assert check_imm_value(offset2) - self.mc.load(r.SCRATCH2, r.SCRATCH2, offset2) + self.mc.load(r.SCRATCH2, r.SCRATCH, offset2) else: # read the typeid self._read_typeid(r.SCRATCH, loc_object) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1081,6 +1081,11 @@ arglocs = self._prepare_guard(op, [loc, resloc]) return arglocs + def prepare_guard_is_object(self, op): + loc_object = self.ensure_reg(op.getarg(0), force_in_reg=True) + arglocs = self._prepare_guard(op, [loc_object]) + return arglocs + def prepare_save_exception(self, op): res = self.rm.force_allocate_reg(op) return [res] @@ -1191,6 +1196,7 @@ # we know it does not move, but well rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) + assert fail_descr > 0 if op.numargs() > 0: loc = self.ensure_reg(op.getarg(0)) locs = [loc, imm(fail_descr)] From pypy.commits at gmail.com Mon Jan 18 09:23:25 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 06:23:25 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: modified 2 tests, first used void pointer instead of int pointer (thus qsort did not sort), Message-ID: <569cf55d.ea5ec20a.4bd55.ffff9021@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81835:58481ef6fdd3 Date: 2016-01-18 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/58481ef6fdd3/ Log: modified 2 tests, first used void pointer instead of int pointer (thus qsort did not sort), the second writes to a union but on a 64 bit big endian machine this byte ends up in the MSB of the result instead of the LSB diff --git a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- a/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py @@ -945,6 +945,11 @@ a[4] = rffi.r_int(4) def compare(a, b): + # do not use a,b directly! on a big endian machine + # ((void*)ptr)[0] will return 0x0 if the 32 bit value + # ptr points to is 0x1 + a = rffi.cast(rffi.INTP, a) + b = rffi.cast(rffi.INTP, b) if a[0] > b[0]: return rffi.r_int(1) else: diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -236,7 +236,9 @@ fn = self.getcompiled(llf, [int]) res = fn(0x33) - assert res in [0x10203033, 0x33203040] + assert res in [0x10203033, 0x33203040, + # big endian 64 bit machine + 0x3300000010203040] def test_sizeof_void_array(self): from rpython.rtyper.lltypesystem import llmemory From pypy.commits at gmail.com Mon Jan 18 10:00:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 07:00:19 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: reverted debug lines Message-ID: <569cfe03.8a58c20a.921f0.ffffa975@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81836:4a1e1cbe4d29 Date: 2016-01-18 15:48 +0100 http://bitbucket.org/pypy/pypy/changeset/4a1e1cbe4d29/ Log: reverted debug lines From pypy.commits at gmail.com Mon Jan 18 10:09:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 07:09:49 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: another case where regalloc pair did not succeed Message-ID: <569d003d.85b01c0a.167c1.ffff9c1d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81837:477fb942bb79 Date: 2016-01-18 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/477fb942bb79/ Log: another case where regalloc pair did not succeed diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -284,7 +284,9 @@ if i+1 < len(r.MANAGED_REGS): reg2 = r.MANAGED_REGS[i+1] assert reg.is_even() and reg2.is_odd() - ovar = reverse_mapping[reg] + ovar = reverse_mapping.get(reg,None) + if ovar is None: + continue if ovar in forbidden_vars: continue ovar2 = reverse_mapping.get(reg2, None) From pypy.commits at gmail.com Mon Jan 18 12:58:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 18 Jan 2016 09:58:02 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Create sandbox stubs in the annotator and finally get rid of databasing-time creation of sandbox helpers Message-ID: <569d27aa.c2351c0a.2fdde.ffffd5fe@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81838:2906290a8ebc Date: 2016-01-18 17:57 +0000 http://bitbucket.org/pypy/pypy/changeset/2906290a8ebc/ Log: Create sandbox stubs in the annotator and finally get rid of databasing-time creation of sandbox helpers diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -358,6 +358,10 @@ return self.descs[obj_key] except KeyError: if isinstance(pyobj, types.FunctionType): + if self.annotator.translator.config.translation.sandbox: + if hasattr(pyobj, '_ptr') and not getattr(pyobj._ptr._obj, '_safe_not_sandboxed', True): + from rpython.translator.sandbox.rsandbox import get_sandbox_stub + pyobj = get_sandbox_stub(pyobj._ptr._obj) result = description.FunctionDesc(self, pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -903,26 +903,12 @@ funcgen.implementation_end() def new_funcnode(db, T, obj, forcename=None): - sandbox = db.sandbox and need_sandboxing(obj) - if sandbox: - if getattr(obj, 'external', None) is not None: - obj.__dict__['graph'] = rsandbox.get_sandbox_stub( - obj, db.translator.rtyper) - obj.__dict__.pop('_safe_not_sandboxed', None) - obj.__dict__.pop('external', None) - if forcename: name = forcename else: name = _select_name(db, obj) return FuncNode(db, T, obj, name) -def need_sandboxing(fnobj): - if hasattr(fnobj, '_safe_not_sandboxed'): - return not fnobj._safe_not_sandboxed - else: - return "if_external" - def select_function_code_generators(fnobj, db, functionname): if hasattr(fnobj, 'graph'): exception_policy = getattr(fnobj, 'exception_policy', None) diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -117,13 +117,11 @@ dump_string = rmarshal.get_marshaller(str) load_int = rmarshal.get_loader(int) -def get_sandbox_stub(fnobj, rtyper): - """Build always-raising graph for unsupported external function.""" +def get_sandbox_stub(fnobj): + """Build always-raising stub function for unsupported external function.""" fnname = fnobj._name - args_s, s_result = sig_ll(fnobj) msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) - execute = make_stub(fnname, msg) - return _annotate(rtyper, execute, args_s, s_result) + return make_stub(fnname, msg) def get_external_function_sandbox_graph(fnobj, rtyper): """Build the graph of a helper trampoline function to be used diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -8,6 +8,7 @@ from rpython.translator.interactive import Translation from rpython.translator.sandbox.sandlib import read_message, write_message from rpython.translator.sandbox.sandlib import write_exception +from rpython.translator.tool.cbuild import ExternalCompilationInfo if hasattr(signal, 'alarm'): _orig_read_message = read_message @@ -292,6 +293,16 @@ rescode = pipe.wait() assert rescode == 0 +def test_llexternal(): + c_foo = rffi.llexternal('foo', [], rffi.INT) + def f(argv): + try: + c_foo() + except: + pass + return 0 + compile(f) # Check that this doesn't crash + class TestPrintedResults: def run(self, entry_point, args, expected): From pypy.commits at gmail.com Mon Jan 18 14:29:17 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 18 Jan 2016 11:29:17 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in vincentlegoll/pypy/fix-cpython-ssl-tests-2.7 (pull request #389) Message-ID: <569d3d0d.4c301c0a.1e7ab.fffffb7e@mx.google.com> Author: Maciej Fijalkowski Branch: Changeset: r81842:ac2ca4598b64 Date: 2016-01-18 20:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ac2ca4598b64/ Log: Merged in vincentlegoll/pypy/fix-cpython-ssl-tests-2.7 (pull request #389) Fix SSL tests diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -119,3 +119,7 @@ Updated to CFFI 1.5, which supports a new way to do embedding. Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch From pypy.commits at gmail.com Mon Jan 18 14:29:26 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Mon, 18 Jan 2016 11:29:26 -0800 (PST) Subject: [pypy-commit] pypy fix-cpython-ssl-tests-2.7: New branch to fix SSL tests by porting the cpython patch Message-ID: <569d3d16.46bb1c0a.7c11b.fffffe28@mx.google.com> Author: Vincent Legoll Branch: fix-cpython-ssl-tests-2.7 Changeset: r81839:a989150f7e92 Date: 2016-01-18 18:54 +0100 http://bitbucket.org/pypy/pypy/changeset/a989150f7e92/ Log: New branch to fix SSL tests by porting the cpython patch From pypy.commits at gmail.com Mon Jan 18 14:29:30 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Mon, 18 Jan 2016 11:29:30 -0800 (PST) Subject: [pypy-commit] pypy fix-cpython-ssl-tests-2.7: Add SSL fixing branch description Message-ID: <569d3d1a.cb571c0a.a54d.ffffffbf@mx.google.com> Author: Vincent Legoll Branch: fix-cpython-ssl-tests-2.7 Changeset: r81841:7cb4ec9641f8 Date: 2016-01-18 19:05 +0100 http://bitbucket.org/pypy/pypy/changeset/7cb4ec9641f8/ Log: Add SSL fixing branch description diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -119,3 +119,7 @@ Updated to CFFI 1.5, which supports a new way to do embedding. Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch From pypy.commits at gmail.com Mon Jan 18 14:29:28 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Mon, 18 Jan 2016 11:29:28 -0800 (PST) Subject: [pypy-commit] pypy fix-cpython-ssl-tests-2.7: Fix cpython test suite's SSL tests Message-ID: <569d3d18.c717c20a.f2f98.03fc@mx.google.com> Author: Vincent Legoll Branch: fix-cpython-ssl-tests-2.7 Changeset: r81840:5a76a7be8569 Date: 2016-01-18 19:00 +0100 http://bitbucket.org/pypy/pypy/changeset/5a76a7be8569/ Log: Fix cpython test suite's SSL tests They were failing since the expiration of certificate for svn.python.org at Thu 24 Dec 2015. See details at cpython's Issue #25940. [1] This patch is an import of cpython's patch fixing the issue, see [2]. The only modifications are in the paths that are different in pypy's repository and the missing Misc/NEWS file for which the hunk has been dropped. [1] http://bugs.python.org/issue25940 [2] https://hg.python.org/cpython/rev/fb7131939508 diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: From pypy.commits at gmail.com Mon Jan 18 15:53:24 2016 From: pypy.commits at gmail.com (sbauman) Date: Mon, 18 Jan 2016 12:53:24 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Forgot to change this back earlier Message-ID: <569d50c4.84c9c20a.76280.1a11@mx.google.com> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r81843:de689eafaf4b Date: 2016-01-18 15:52 -0500 http://bitbucket.org/pypy/pypy/changeset/de689eafaf4b/ Log: Forgot to change this back earlier diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -66,7 +66,7 @@ class OptPure(Optimization): def __init__(self): self.postponed_op = None - self._pure_operations = [None] * (rop._NOSIDEEFFECT_LAST - + self._pure_operations = [None] * (rop._ALWAYS_PURE_LAST - rop._ALWAYS_PURE_FIRST) self.call_pure_positions = [] self.extra_call_pure = [] From pypy.commits at gmail.com Tue Jan 19 02:42:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 23:42:22 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added nursery_ptr_increment, added lldebug to some tests to debug in gdb Message-ID: <569de8de.88d31c0a.3ad3.ffff9beb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81844:c0cdd96eb4d2 Date: 2016-01-19 08:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c0cdd96eb4d2/ Log: added nursery_ptr_increment, added lldebug to some tests to debug in gdb diff --git a/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py b/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py --- a/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py +++ b/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py @@ -26,6 +26,7 @@ t = TranslationContext() t.config.translation.gc = "minimark" t.config.translation.gcremovetypeptr = gcremovetypeptr + t.config.translation.lldebug = True ann = t.buildannotator() ann.build_types(main, [s_list_of_strings], main_entry_point=True) rtyper = t.buildrtyper() diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -84,7 +84,7 @@ # t = TranslationContext() t.config.translation.gc = gc - # t.config.translation.lldebug = True # pretty useful when debugging assembly + t.config.translation.lldebug = True # pretty useful when debugging assembly if gc != 'boehm': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -32,6 +32,8 @@ emit_int_add = gen_emit_imm_pool_rr('AGFI','AG','AGR') emit_int_add_ovf = emit_int_add + emit_nursery_ptr_increment = emit_int_add + def emit_int_sub(self, op, arglocs, regalloc): l0, l1 = arglocs if l1.is_imm() and not l1.is_in_pool(): @@ -1030,7 +1032,6 @@ def emit_zero_array(self, op, arglocs, regalloc): base_loc, startindex_loc, length_loc, \ ofs_loc, itemsize_loc, pad_byte_loc = arglocs - print(op, arglocs) if ofs_loc.is_imm(): assert check_imm_value(ofs_loc.value) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -669,6 +669,7 @@ prepare_int_floordiv = helper.prepare_int_div prepare_uint_floordiv = helper.prepare_int_div prepare_int_mod = helper.prepare_int_mod + prepare_nursery_ptr_increment = prepare_int_add prepare_int_and = helper.prepare_int_logic prepare_int_or = helper.prepare_int_logic From pypy.commits at gmail.com Tue Jan 19 02:45:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 18 Jan 2016 23:45:07 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <569de983.4e8e1c0a.ec563.ffff92ae@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81845:14c96c77a0fd Date: 2016-01-19 08:44 +0100 http://bitbucket.org/pypy/pypy/changeset/14c96c77a0fd/ Log: merged default diff too long, truncating to 2000 out of 3821 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,31 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.setdefault('libraries', []) + if pythonlib not in libraries: + libraries.append(pythonlib) + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +626,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +669,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -111,6 +111,19 @@ the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + .. branch: memop-simplify3 Further simplifying the backend operations malloc_cond_varsize and zero_array. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,13 +84,6 @@ from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -149,6 +145,9 @@ return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1084,7 +1084,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,10 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + def get_dict_rtld_constants(): found = {} @@ -78,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,24 +2,25 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) @@ -38,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,146 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" +/* XXX Windows missing */ +#include +#include +#include + +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + RPyGilAllocate(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 5): - py.test.skip("re-enable me in version 1.5") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py @@ -54,3 +54,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -1719,3 +1719,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py @@ -60,11 +60,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -209,6 +214,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py @@ -0,0 +1,1 @@ +# Generated by pypy/tool/import_cffi.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py new file mode 100644 From pypy.commits at gmail.com Tue Jan 19 03:27:28 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 00:27:28 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <569df370.e935c20a.2f98b.ffffae75@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r693:d190f74b3a12 Date: 2016-01-19 09:27 +0100 http://bitbucket.org/pypy/pypy.org/changeset/d190f74b3a12/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62717 of $105000 (59.7%) + $62736 of $105000 (59.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Tue Jan 19 03:56:32 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Jan 2016 00:56:32 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: issue while rewriting, missed constant factor that is not multiplied to index Message-ID: <569dfa40.953f1c0a.3b95c.ffffa8e3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81846:69b3adf80fac Date: 2016-01-19 09:55 +0100 http://bitbucket.org/pypy/pypy/changeset/69b3adf80fac/ Log: issue while rewriting, missed constant factor that is not multiplied to index diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -163,11 +163,11 @@ assert isinstance(v, ConstPtr) array_index = moving_obj_tracker.get_array_index(v) - size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - scale = size + factor, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) + array_index = array_index * factor args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), - ConstInt(scale), + ConstInt(1), # already multiplied to array_index ConstInt(offset), ConstInt(size)] load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) From pypy.commits at gmail.com Tue Jan 19 03:57:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Jan 2016 00:57:18 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: translation issue Message-ID: <569dfa6e.878e1c0a.5ceee.ffffb125@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81847:d7a92b0fc07e Date: 2016-01-19 09:56 +0100 http://bitbucket.org/pypy/pypy/changeset/d7a92b0fc07e/ Log: translation issue diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -163,7 +163,8 @@ assert isinstance(v, ConstPtr) array_index = moving_obj_tracker.get_array_index(v) - factor, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) + size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) + factor = size array_index = array_index * factor args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), From pypy.commits at gmail.com Tue Jan 19 04:27:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Jan 2016 01:27:49 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: ensure the index arg of gc_load_indexed to be in a register (assembler assumed this), adding offset to _rewrite_changeable_constptrs Message-ID: <569e0195.a5c9c20a.a5cff.ffffa00a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81848:8b6b10912648 Date: 2016-01-19 10:26 +0100 http://bitbucket.org/pypy/pypy/changeset/8b6b10912648/ Log: ensure the index arg of gc_load_indexed to be in a register (assembler assumed this), adding offset to _rewrite_changeable_constptrs diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -165,11 +165,11 @@ size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) factor = size - array_index = array_index * factor + array_index = array_index * factor + offset args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), ConstInt(1), # already multiplied to array_index - ConstInt(offset), + ConstInt(0), # already added ConstInt(size)] load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) newops.append(load_op) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -903,7 +903,7 @@ emit_gc_load_r = _emit_gc_load def _emit_gc_load_indexed(self, op, arglocs, regalloc): - result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc =arglocs + result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc=arglocs assert not result_loc.is_in_pool() assert not base_loc.is_in_pool() assert not index_loc.is_in_pool() diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -813,7 +813,7 @@ def _prepare_gc_load_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + index_loc = self.ensure_reg(op.getarg(1), force_in_reg=True) scale_box = op.getarg(2) offset_box = op.getarg(3) size_box = op.getarg(4) @@ -979,11 +979,11 @@ return locs def prepare_cond_call_gc_wb(self, op): - arglocs = [self.ensure_reg(op.getarg(0))] + arglocs = [self.ensure_reg(op.getarg(0), force_in_reg=True)] return arglocs def prepare_cond_call_gc_wb_array(self, op): - arglocs = [self.ensure_reg(op.getarg(0)), + arglocs = [self.ensure_reg(op.getarg(0), force_in_reg=True), self.ensure_reg_or_16bit_imm(op.getarg(1)), None] if arglocs[1].is_reg(): From pypy.commits at gmail.com Tue Jan 19 04:58:54 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 01:58:54 -0800 (PST) Subject: [pypy-commit] pypy default: fix link, add more Message-ID: <569e08de.624fc20a.9db1e.ffffce8c@mx.google.com> Author: Armin Rigo Branch: Changeset: r81849:11d889f2493e Date: 2016-01-19 10:57 +0100 http://bitbucket.org/pypy/pypy/changeset/11d889f2493e/ Log: fix link, add more diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html From pypy.commits at gmail.com Tue Jan 19 05:02:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 02:02:16 -0800 (PST) Subject: [pypy-commit] pypy default: typo Message-ID: <569e09a8.ca56c20a.19ef4.ffffcf31@mx.google.com> Author: Armin Rigo Branch: Changeset: r81850:551b7fdef1dd Date: 2016-01-19 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/551b7fdef1dd/ Log: typo diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -120,6 +120,6 @@ Updated to CFFI 1.5, which supports a new way to do embedding. Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. -.. branch fix-cpython-ssl-tests-2.7 +.. branch: fix-cpython-ssl-tests-2.7 Fix SSL tests by importing cpython's patch From pypy.commits at gmail.com Tue Jan 19 05:41:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 19 Jan 2016 02:41:01 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added overflow test of pool (not yet finished) Message-ID: <569e12bd.cb941c0a.35825.ffffe15f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81851:6f01de4c9329 Date: 2016-01-19 11:40 +0100 http://bitbucket.org/pypy/pypy/changeset/6f01de4c9329/ Log: added overflow test of pool (not yet finished) fixed test in test_newgc, it did not write the full return value of libffi, thus it left garbage in the return value libffi passes on diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py --- a/rpython/jit/backend/zarch/test/test_pool.py +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -1,12 +1,15 @@ +import py from rpython.jit.backend.zarch.pool import LiteralPool from rpython.jit.metainterp.history import (AbstractFailDescr, AbstractDescr, BasicFailDescr, BasicFinalDescr, JitCellToken, TargetToken, ConstInt, ConstPtr, Const, ConstFloat) from rpython.jit.metainterp.resoperation import (ResOperation, rop, InputArgInt) +from rpython.jit.backend.zarch.codebuilder import InstrBuilder from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.jit.backend.zarch.helper.regalloc import check_imm32 -import py +from rpython.jit.backend.zarch.assembler import AssemblerZARCH +from rpython.jit.backend.detect_cpu import getcpuclass class TestPoolZARCH(object): def setup_class(self): @@ -47,3 +50,10 @@ assert self.const_in_pool(c2) else: assert not self.const_in_pool(c2) + + def test_pool_overflow(self): + cpu = getcpuclass()(None, None) + cpu.setup_once() + ops = [ResOperation(rop.FLOAT_ADD, [ConstFloat(0.0125), ConstFloat(float(i))]) for i in range(100)] + cpu.compile_loop([], ops, JitCellToken()) + diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -695,11 +695,15 @@ p_a2 = rffi.cast(rffi.VOIDPP, ll_args[1])[0] a1 = rffi.cast(rffi.SIGNEDP, p_a1)[0] a2 = rffi.cast(rffi.SIGNEDP, p_a2)[0] - res = rffi.cast(rffi.INTP, ll_res) + # related to libffi issue on s390x, we MUST + # overwrite the full ffi result which is 64 bit + # if not, this leaves garbage in the return value + # and qsort does not sorrt correctly + res = rffi.cast(rffi.SIGNEDP, ll_res) if a1 > a2: - res[0] = rffi.cast(rffi.INT, 1) + res[0] = 1 else: - res[0] = rffi.cast(rffi.INT, -1) + res[0] = -1 def f(): libc = CDLL(get_libc_name()) @@ -707,7 +711,7 @@ ffi_size_t, ffi_type_pointer], ffi_type_void) - ptr = CallbackFuncPtr([ffi_type_pointer, ffi_type_pointer], + ptr = CallbackFuncPtr([ffi_type_pointer, ffi_type_pointer, ffi_type_pointer], ffi_type_sint, callback) TP = rffi.CArray(lltype.Signed) From pypy.commits at gmail.com Tue Jan 19 09:30:30 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 06:30:30 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: Restarting the cpyext-gc-support branch from scratch Message-ID: <569e4886.84e31c0a.70bdc.29ab@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81852:371806b0659b Date: 2016-01-19 14:46 +0100 http://bitbucket.org/pypy/pypy/changeset/371806b0659b/ Log: Restarting the cpyext-gc-support branch from scratch (will copy a lot of code from the old one) From pypy.commits at gmail.com Tue Jan 19 09:30:34 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 06:30:34 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: updates Message-ID: <569e488a.a5c9c20a.a5cff.0cf1@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81854:05cd68cd7259 Date: 2016-01-19 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/05cd68cd7259/ Log: updates diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -67,8 +67,8 @@ Let P = list of links created with rawrefcount.create_link_pypy() and O = list of links created with rawrefcount.create_link_pyobj(). The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all -the data is in the PyObjects, and all references (if any) are regular -CPython-like reference counts. +the data is in the PyObjects, and all outsite references (if any) are +in C, as "PyObject *" fields. So, during the collection we do this about P links: @@ -81,7 +81,7 @@ this: for (p, ob) in P + O: - if p is not surviving: + if p is not surviving: # even if 'ob' might be surviving unlink p and ob if ob->ob_refcnt == REFCNT_FROM_PYPY_LIGHT: free(ob) @@ -106,6 +106,11 @@ Further notes ------------- +XXX +XXX the rest is the ideal world, but as a first step, we'll look +XXX for the minimal tweaks needed to adapt the existing cpyext +XXX + For objects that are opaque in CPython, like , we always create a PyPy object, and then when needed we make an empty PyObject and attach it with create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. From pypy.commits at gmail.com Tue Jan 19 09:30:32 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 06:30:32 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: copy the basic GC support Message-ID: <569e4888.878e1c0a.5ceee.283b@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81853:567f4931007c Date: 2016-01-19 15:01 +0100 http://bitbucket.org/pypy/pypy/changeset/567f4931007c/ Log: copy the basic GC support diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/rawrefcount.rst @@ -0,0 +1,153 @@ +====================== +Rawrefcount and the GC +====================== + + +GC Interface +------------ + +"PyObject" is a raw structure with at least two fields, ob_refcnt and +ob_pypy_link. The ob_refcnt is the reference counter as used on +CPython. If the PyObject structure is linked to a live PyPy object, +its current address is stored in ob_pypy_link and ob_refcnt is bumped +by either the constant REFCNT_FROM_PYPY, or the constant +REFCNT_FROM_PYPY_LIGHT (== REFCNT_FROM_PYPY + SOME_HUGE_VALUE) +(to mean "light finalizer"). + +Most PyPy objects exist outside cpyext, and conversely in cpyext it is +possible that a lot of PyObjects exist without being seen by the rest +of PyPy. At the interface, however, we can "link" a PyPy object and a +PyObject. There are two kinds of link: + +rawrefcount.create_link_pypy(p, ob) + + Makes a link between an exising object gcref 'p' and a newly + allocated PyObject structure 'ob'. ob->ob_refcnt must be + initialized to either REFCNT_FROM_PYPY, or + REFCNT_FROM_PYPY_LIGHT. (The second case is an optimization: + when the GC finds the PyPy object and PyObject no longer + referenced, it can just free() the PyObject.) + +rawrefcount.create_link_pyobj(p, ob) + + Makes a link from an existing PyObject structure 'ob' to a newly + allocated W_CPyExtPlaceHolderObject 'p'. You must also add + REFCNT_FROM_PYPY to ob->ob_refcnt. For cases where the PyObject + contains all the data, and the PyPy object is just a proxy. The + W_CPyExtPlaceHolderObject should have only a field that contains + the address of the PyObject, but that's outside the scope of the + GC. + +rawrefcount.from_obj(p) + + If there is a link from object 'p' made with create_link_pypy(), + returns the corresponding 'ob'. Otherwise, returns NULL. + +rawrefcount.to_obj(Class, ob) + + Returns ob->ob_pypy_link, cast to an instance of 'Class'. + + +Collection logic +---------------- + +Objects existing purely on the C side have ob->ob_pypy_link == 0; +these are purely reference counted. On the other hand, if +ob->ob_pypy_link != 0, then ob->ob_refcnt is at least REFCNT_FROM_PYPY +and the object is part of a "link". + +The idea is that links whose 'p' is not reachable from other PyPy +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY or +REFCNT_FROM_PYPY_LIGHT are the ones who die. But it is more messy +because PyObjects still (usually) need to have a tp_dealloc called, +and this cannot occur immediately (and can do random things like +accessing other references this object points to, or resurrecting the +object). + +Let P = list of links created with rawrefcount.create_link_pypy() +and O = list of links created with rawrefcount.create_link_pyobj(). +The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all +the data is in the PyObjects, and all references (if any) are regular +CPython-like reference counts. + +So, during the collection we do this about P links: + + for (p, ob) in P: + if ob->ob_refcnt != REFCNT_FROM_PYPY + and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT: + mark 'p' as surviving, as well as all its dependencies + +At the end of the collection, the P and O links are both handled like +this: + + for (p, ob) in P + O: + if p is not surviving: + unlink p and ob + if ob->ob_refcnt == REFCNT_FROM_PYPY_LIGHT: + free(ob) + elif ob->ob_refcnt > REFCNT_FROM_PYPY_LIGHT: + ob->ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + else: + ob->ob_refcnt -= REFCNT_FROM_PYPY + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC + + +GC Implementation +----------------- + +We need two copies of both the P list and O list, for young or old +objects. All four lists can be regular AddressLists of 'ob' objects. + +We also need an AddressDict mapping 'p' to 'ob' for all links in the P +list, and update it when PyPy objects move. + + +Further notes +------------- + +For objects that are opaque in CPython, like , we always create +a PyPy object, and then when needed we make an empty PyObject and +attach it with create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. + +For and objects, the corresponding PyObjects contain a +"long" or "double" field too. We link them with create_link_pypy() +and we can use REFCNT_FROM_PYPY_LIGHT too: 'tp_dealloc' doesn't +need to be called, and instead just calling free() is fine. + +For objects, we need both a PyPy and a PyObject side. These +are made with create_link_pypy()/REFCNT_FROM_PYPY. + +For custom PyXxxObjects allocated from the C extension module, we +need create_link_pyobj(). + +For or objects coming from PyPy, we use +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT with a PyObject +preallocated with the size of the string. We copy the string +lazily into that area if PyString_AS_STRING() is called. + +For , , or objects in the C extension +module, we first allocate it as only a PyObject, which supports +mutation of the data from C, like CPython. When it is exported to +PyPy we could make a W_CPyExtPlaceHolderObject with +create_link_pyobj(). + +For objects coming from PyPy, if they are not specialized, +then the PyPy side holds a regular reference to the items. Then we +can allocate a PyTupleObject and store in it borrowed PyObject +pointers to the items. Such a case is created with +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. If it is specialized, +then it doesn't work because the items are created just-in-time on the +PyPy side. In this case, the PyTupleObject needs to hold real +references to the PyObject items, and we use create_link_pypy()/ +REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects +that we can directly return from PySequence_Fast_ITEMS, PyTuple_ITEMS, +PyTuple_GetItem, and so on. + +For objects coming from PyPy, we can use a cpyext list +strategy. The list turns into a PyListObject, as if it had been +allocated from C in the first place. The special strategy can hold +(only) a direct reference to the PyListObject, and we can use either +create_link_pyobj() or create_link_pypy() (to be decided). +PySequence_Fast_ITEMS then works for lists too, and PyList_GetItem +can return a borrowed reference, and so on. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -706,6 +706,7 @@ self.major_collection_step() else: self.minor_and_major_collection() + self.rrc_invoke_callback() def collect_and_reserve(self, totalsize): @@ -783,12 +784,15 @@ self.threshold_reached()): # ^^but only if still self.minor_collection() # the same collection self.major_collection_step() - # - # The nursery might not be empty now, because of - # execute_finalizers(). If it is almost full again, - # we need to fix it with another call to minor_collection(). - if self.nursery_free + totalsize > self.nursery_top: - self.minor_collection() + # + self.rrc_invoke_callback() + # + # The nursery might not be empty now, because of + # execute_finalizers() or rrc_invoke_callback(). + # If it is almost full again, + # we need to fix it with another call to minor_collection(). + if self.nursery_free + totalsize > self.nursery_top: + self.minor_collection() # else: ll_assert(minor_collection_count == 2, @@ -861,6 +865,7 @@ if self.threshold_reached(raw_malloc_usage(totalsize) + self.nursery_size // 2): self.major_collection_step(raw_malloc_usage(totalsize)) + self.rrc_invoke_callback() # note that this loop should not be infinite: when the # last step of a major collection is done but # threshold_reached(totalsize) is still true, then @@ -1080,35 +1085,19 @@ "odd-valued (i.e. tagged) pointer unexpected here") return self.nursery <= addr < self.nursery + self.nursery_size - def appears_to_be_young(self, addr): - # "is a valid addr to a young object?" - # but it's ok to occasionally return True accidentally. - # Maybe the best implementation would be a bloom filter - # of some kind instead of the dictionary lookup that is - # sometimes done below. But the expected common answer - # is "Yes" because addr points to the nursery, so it may - # not be useful to optimize the other case too much. - # - # First, if 'addr' appears to be a pointer to some place within - # the nursery, return True - if not self.translated_to_c: - # When non-translated, filter out tagged pointers explicitly. - # When translated, it may occasionally give a wrong answer - # of True if 'addr' is a tagged pointer with just the wrong value. - if not self.is_valid_gc_object(addr): - return False - + def is_young_object(self, addr): + # Check if the object at 'addr' is young. + if not self.is_valid_gc_object(addr): + return False # filter out tagged pointers explicitly. if self.nursery <= addr < self.nursery_top: return True # addr is in the nursery - # # Else, it may be in the set 'young_rawmalloced_objects' return (bool(self.young_rawmalloced_objects) and self.young_rawmalloced_objects.contains(addr)) - appears_to_be_young._always_inline_ = True def debug_is_old_object(self, addr): return (self.is_valid_gc_object(addr) - and not self.appears_to_be_young(addr)) + and not self.is_young_object(addr)) def is_forwarded(self, obj): """Returns True if the nursery obj is marked as forwarded. @@ -1618,6 +1607,10 @@ self._visit_old_objects_pointing_to_pinned, None) current_old_objects_pointing_to_pinned.delete() # + # visit the P list from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_trace() + # while True: # If we are using card marking, do a partial trace of the arrays # that are flagged with GCFLAG_CARDS_SET. @@ -1666,6 +1659,10 @@ if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery @@ -2178,9 +2175,13 @@ # finalizers/weak references are rare and short which means that # they do not need a separate state and do not need to be # made incremental. + # For now, the same applies to rawrefcount'ed objects. if (not self.objects_to_trace.non_empty() and not self.more_objects_to_trace.non_empty()): # + if self.rrc_enabled: + self.rrc_major_collection_trace() + # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2215,6 +2216,10 @@ self.old_objects_pointing_to_pinned = \ new_old_objects_pointing_to_pinned self.updated_old_objects_pointing_to_pinned = True + # + if self.rrc_enabled: + self.rrc_major_collection_free() + # self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: @@ -2745,3 +2750,234 @@ (obj + offset).address[0] = llmemory.NULL self.old_objects_with_weakrefs.delete() self.old_objects_with_weakrefs = new_with_weakref + + + # ---------- + # RawRefCount + + rrc_enabled = False + + _ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True}) + PYOBJ_HDR = lltype.Struct('GCHdr_PyObject', + ('ob_refcnt', lltype.Signed), + ('ob_pypy_link', lltype.Signed)) + PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) + RAWREFCOUNT_DEALLOC_TRIGGER = lltype.Ptr(lltype.FuncType([], lltype.Void)) + + def _pyobj(self, pyobjaddr): + return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) + + def rawrefcount_init(self, dealloc_trigger_callback): + # see pypy/doc/discussion/rawrefcount.rst + if not self.rrc_enabled: + self.rrc_p_list_young = self.AddressStack() + self.rrc_p_list_old = self.AddressStack() + self.rrc_o_list_young = self.AddressStack() + self.rrc_o_list_old = self.AddressStack() + self.rrc_p_dict = self.AddressDict() # non-nursery keys only + self.rrc_p_dict_nurs = self.AddressDict() # nursery keys only + p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', + track_allocation=False) + self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) + self.rrc_dealloc_trigger_callback = dealloc_trigger_callback + self.rrc_dealloc_pending = self.AddressStack() + self.rrc_enabled = True + + def check_no_more_rawrefcount_state(self): + "NOT_RPYTHON: for tests" + assert self.rrc_p_list_young.length() == 0 + assert self.rrc_p_list_old .length() == 0 + assert self.rrc_o_list_young.length() == 0 + assert self.rrc_o_list_old .length() == 0 + def check_value_is_null(key, value, ignore): + assert value == llmemory.NULL + self.rrc_p_dict.foreach(check_value_is_null, None) + self.rrc_p_dict_nurs.foreach(check_value_is_null, None) + + def rawrefcount_create_link_pypy(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + objint = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = objint + # + lst = self.rrc_p_list_young + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + if not self.is_young_object(obj): + lst = self.rrc_p_list_old + lst.append(pyobject) + dct.setitem(obj, pyobject) + + def rawrefcount_create_link_pyobj(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_young_object(obj): + self.rrc_o_list_young.append(pyobject) + else: + self.rrc_o_list_old.append(pyobject) + objint = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = objint + # there is no rrc_o_dict + + def rawrefcount_from_obj(self, gcobj): + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + return dct.get(obj) + + def rawrefcount_to_obj(self, pyobject): + obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def rawrefcount_next_dead(self): + if self.rrc_dealloc_pending.non_empty(): + return self.rrc_dealloc_pending.pop() + return llmemory.NULL + + + def rrc_invoke_callback(self): + if self.rrc_enabled and self.rrc_dealloc_pending.non_empty(): + self.rrc_dealloc_trigger_callback() + + def rrc_minor_collection_trace(self): + length_estimate = self.rrc_p_dict_nurs.length() + self.rrc_p_dict_nurs.delete() + self.rrc_p_dict_nurs = self.AddressDict(length_estimate) + self.rrc_p_list_young.foreach(self._rrc_minor_trace, + self.rrc_singleaddr) + + def _rrc_minor_trace(self, pyobject, singleaddr): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + singleaddr.address[0] = llmemory.cast_int_to_adr(intobj) + self._trace_drag_out(singleaddr, llmemory.NULL) + + def rrc_minor_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1") + lst = self.rrc_p_list_young + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_p_list_old, + self.rrc_p_dict) + lst = self.rrc_o_list_young + no_o_dict = self.null_address_dict() + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_o_list_old, + no_o_dict) + + def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.is_in_nursery(obj): + if self.is_forwarded(obj): + # Common case: survives and moves + obj = self.get_forwarding_address(obj) + intobj = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = intobj + surviving = True + if surviving_dict: + # Surviving nursery object: was originally in + # rrc_p_dict_nurs and now must be put into rrc_p_dict + surviving_dict.setitem(obj, pyobject) + else: + surviving = False + elif (bool(self.young_rawmalloced_objects) and + self.young_rawmalloced_objects.contains(obj)): + # young weakref to a young raw-malloced object + if self.header(obj).tid & GCFLAG_VISITED_RMY: + surviving = True # survives, but does not move + else: + surviving = False + if surviving_dict: + # Dying young large object: was in rrc_p_dict, + # must be deleted + surviving_dict.setitem(obj, llmemory.NULL) + else: + ll_assert(False, "rrc_X_list_young contains non-young obj") + return + # + if surviving: + surviving_list.append(pyobject) + else: + self._rrc_free(pyobject) + + def _rrc_free(self, pyobject): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc >= REFCNT_FROM_PYPY_LIGHT: + rc -= REFCNT_FROM_PYPY_LIGHT + if rc == 0: + lltype.free(self._pyobj(pyobject), flavor='raw') + else: + # can only occur if LIGHT is used in create_link_pyobj() + self._pyobj(pyobject).ob_refcnt = rc + self._pyobj(pyobject).ob_pypy_link = 0 + else: + ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") + ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), + "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") + rc -= REFCNT_FROM_PYPY + self._pyobj(pyobject).ob_refcnt = rc + self._pyobj(pyobject).ob_pypy_link = 0 + if rc == 0: + self.rrc_dealloc_pending.append(pyobject) + _rrc_free._always_inline_ = True + + def rrc_major_collection_trace(self): + self.rrc_p_list_old.foreach(self._rrc_major_trace, None) + + def _rrc_major_trace(self, pyobject, ignore): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + self.objects_to_trace.append(obj) + self.visit_all_objects() + + def rrc_major_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 2") + length_estimate = self.rrc_p_dict.length() + self.rrc_p_dict.delete() + self.rrc_p_dict = new_p_dict = self.AddressDict(length_estimate) + new_p_list = self.AddressStack() + while self.rrc_p_list_old.non_empty(): + self._rrc_major_free(self.rrc_p_list_old.pop(), new_p_list, + new_p_dict) + self.rrc_p_list_old.delete() + self.rrc_p_list_old = new_p_list + # + new_o_list = self.AddressStack() + no_o_dict = self.null_address_dict() + while self.rrc_o_list_old.non_empty(): + self._rrc_major_free(self.rrc_o_list_old.pop(), new_o_list, + no_o_dict) + self.rrc_o_list_old.delete() + self.rrc_o_list_old = new_o_list + + def _rrc_major_free(self, pyobject, surviving_list, surviving_dict): + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.header(obj).tid & GCFLAG_VISITED: + surviving_list.append(pyobject) + if surviving_dict: + surviving_dict.insertclean(obj, pyobject) + else: + self._rrc_free(pyobject) diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -0,0 +1,270 @@ +import py +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.memory.gc.incminimark import IncrementalMiniMarkGC +from rpython.memory.gc.test.test_direct import BaseDirectGCTest +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + +PYOBJ_HDR = IncrementalMiniMarkGC.PYOBJ_HDR +PYOBJ_HDR_PTR = IncrementalMiniMarkGC.PYOBJ_HDR_PTR + +S = lltype.GcForwardReference() +S.become(lltype.GcStruct('S', + ('x', lltype.Signed), + ('prev', lltype.Ptr(S)), + ('next', lltype.Ptr(S)))) + + +class TestRawRefCount(BaseDirectGCTest): + GCClass = IncrementalMiniMarkGC + + def _collect(self, major, expected_trigger=0): + if major: + self.gc.collect() + else: + self.gc.minor_collection() + count1 = len(self.trigger) + self.gc.rrc_invoke_callback() + count2 = len(self.trigger) + assert count2 - count1 == expected_trigger + + def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, + create_old=False): + if is_light: + rc = REFCNT_FROM_PYPY_LIGHT + else: + rc = REFCNT_FROM_PYPY + self.trigger = [] + self.gc.rawrefcount_init(lambda: self.trigger.append(1)) + # + p1 = self.malloc(S) + p1.x = intval + if create_old: + self.stackroots.append(p1) + self._collect(major=False) + p1 = self.stackroots.pop() + p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) + r1 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r1.ob_refcnt = rc + r1.ob_pypy_link = 0 + r1addr = llmemory.cast_ptr_to_adr(r1) + if is_pyobj: + assert not is_light + self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) + else: + self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) + assert r1.ob_refcnt == rc + assert r1.ob_pypy_link != 0 + + def check_alive(extra_refcount): + assert r1.ob_refcnt == rc + extra_refcount + assert r1.ob_pypy_link != 0 + p1ref = self.gc.rawrefcount_to_obj(r1addr) + p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref) + assert p1.x == intval + if not is_pyobj: + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + else: + assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL + return p1 + return p1, p1ref, r1, r1addr, check_alive + + def test_rawrefcount_objects_basic(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + p2 = self.malloc(S) + p2.x = 84 + p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) + r2 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r2.ob_refcnt = 1 + r2.ob_pypy_link = 0 + r2addr = llmemory.cast_ptr_to_adr(r2) + # p2 and r2 are not linked + assert r1.ob_pypy_link != 0 + assert r2.ob_pypy_link == 0 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL + assert self.gc.rawrefcount_to_obj(r1addr) == p1ref + assert self.gc.rawrefcount_to_obj(r2addr) == lltype.nullptr( + llmemory.GCREF.TO) + lltype.free(r1, flavor='raw') + lltype.free(r2, flavor='raw') + + def test_rawrefcount_objects_collection_survives_from_raw(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + r1.ob_refcnt += 1 + self._collect(major=False) + check_alive(+1) + self._collect(major=True) + check_alive(+1) + r1.ob_refcnt -= 1 + self._collect(major=False) + p1 = check_alive(0) + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + assert self.trigger == [] + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + + def test_rawrefcount_dies_quickly(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + self._collect(major=False) + if old: + check_alive(0) + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + + def test_rawrefcount_objects_collection_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + + def test_rawrefcount_objects_basic_old(self): + self.test_rawrefcount_objects_basic(old=True) + def test_rawrefcount_objects_collection_survives_from_raw_old(self): + self.test_rawrefcount_objects_collection_survives_from_raw(old=True) + def test_rawrefcount_dies_quickly_old(self): + self.test_rawrefcount_dies_quickly(old=True) + def test_rawrefcount_objects_collection_survives_from_obj_old(self): + self.test_rawrefcount_objects_collection_survives_from_obj(old=True) + + def test_pypy_nonlight_survives_from_raw(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + r1.ob_refcnt += 1 + self._collect(major=False) + check_alive(+1) + self._collect(major=True) + check_alive(+1) + r1.ob_refcnt -= 1 + self._collect(major=False) + p1 = check_alive(0) + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_dies_quickly(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + if old: + self._collect(major=False) + check_alive(0) + self._collect(major=True, expected_trigger=1) + else: + self._collect(major=False, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_survives_from_raw_old(self): + self.test_pypy_nonlight_survives_from_raw(old=True) + def test_pypy_nonlight_survives_from_obj_old(self): + self.test_pypy_nonlight_survives_from_obj(old=True) + def test_pypy_nonlight_dies_quickly_old(self): + self.test_pypy_nonlight_dies_quickly(old=True) + + def test_pyobject_pypy_link_dies_on_minor_collection(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True)) + check_alive(0) + r1.ob_refcnt += 1 # the pyobject is kept alive + self._collect(major=False) + assert r1.ob_refcnt == 1 # refcnt dropped to 1 + assert r1.ob_pypy_link == 0 # detached + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_dies(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + check_alive(0) + if old: + self._collect(major=False) + check_alive(0) + self._collect(major=True, expected_trigger=1) + else: + self._collect(major=False, expected_trigger=1) + assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_pypy_link == 0 # detached + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + assert self.trigger == [] + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_dies_old(self): + self.test_pyobject_dies(old=True) + def test_pyobject_survives_from_obj_old(self): + self.test_pyobject_survives_from_obj(old=True) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -153,6 +153,7 @@ else: # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.GCClass = GCClass if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] @@ -483,6 +484,29 @@ annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + if hasattr(GCClass, 'rawrefcount_init'): + self.rawrefcount_init_ptr = getfn( + GCClass.rawrefcount_init, + [s_gc, SomePtr(GCClass.RAWREFCOUNT_DEALLOC_TRIGGER)], + annmodel.s_None) + self.rawrefcount_create_link_pypy_ptr = getfn( + GCClass.rawrefcount_create_link_pypy, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_create_link_pyobj_ptr = getfn( + GCClass.rawrefcount_create_link_pyobj, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_from_obj_ptr = getfn( + GCClass.rawrefcount_from_obj, [s_gc, s_gcref], SomeAddress(), + inline = True) + self.rawrefcount_to_obj_ptr = getfn( + GCClass.rawrefcount_to_obj, [s_gc, SomeAddress()], s_gcref, + inline = True) + self.rawrefcount_next_dead_ptr = getfn( + GCClass.rawrefcount_next_dead, [s_gc], SomeAddress(), + inline = True) + if GCClass.can_usually_pin_objects: self.pin_ptr = getfn(GCClass.pin, [s_gc, SomeAddress()], @@ -1228,6 +1252,50 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_rawrefcount_init(self, hop): + [v_fnptr] = hop.spaceop.args + assert v_fnptr.concretetype == self.GCClass.RAWREFCOUNT_DEALLOC_TRIGGER + hop.genop("direct_call", + [self.rawrefcount_init_ptr, self.c_const_gc, v_fnptr]) + + def gct_gc_rawrefcount_create_link_pypy(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pypy_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_create_link_pyobj(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pyobj_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_from_obj(self, hop): + [v_gcobj] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert hop.spaceop.result.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_from_obj_ptr, self.c_const_gc, v_gcobj], + resultvar=hop.spaceop.result) + + def gct_gc_rawrefcount_to_obj(self, hop): + [v_pyobject] = hop.spaceop.args + assert v_pyobject.concretetype == llmemory.Address + assert hop.spaceop.result.concretetype == llmemory.GCREF + hop.genop("direct_call", + [self.rawrefcount_to_obj_ptr, self.c_const_gc, v_pyobject], + resultvar=hop.spaceop.result) + + def gct_gc_rawrefcount_next_dead(self, hop): + assert hop.spaceop.result.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_next_dead_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + def _set_into_gc_array_part(self, op): if op.opname == 'setarrayitem': return op.args[1] diff --git a/rpython/rlib/exports.py b/rpython/rlib/exports.py --- a/rpython/rlib/exports.py +++ b/rpython/rlib/exports.py @@ -1,5 +1,7 @@ from rpython.rtyper.lltypesystem.lltype import typeOf, ContainerType +# XXX kill me + def export_struct(name, struct): assert name not in EXPORTS_names, "Duplicate export " + name assert isinstance(typeOf(struct), ContainerType) diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rawrefcount.py @@ -0,0 +1,262 @@ +# +# See documentation in pypy/doc/discussion/rawrefcount.rst +# +import sys, weakref +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rlib import rgc + + +REFCNT_FROM_PYPY = 80 +REFCNT_FROM_PYPY_LIGHT = REFCNT_FROM_PYPY + (sys.maxint//2+1) + +RAWREFCOUNT_DEALLOC_TRIGGER = lltype.Ptr(lltype.FuncType([], lltype.Void)) + + +def _build_pypy_link(p): + res = len(_adr2pypy) + _adr2pypy.append(p) + return res + + +def init(dealloc_trigger_callback=None): + """NOT_RPYTHON: set up rawrefcount with the GC. This is only used + for tests; it should not be called at all during translation. + """ + global _p_list, _o_list, _adr2pypy, _pypy2ob + global _d_list, _dealloc_trigger_callback + _p_list = [] + _o_list = [] + _adr2pypy = [None] + _pypy2ob = {} + _d_list = [] + _dealloc_trigger_callback = dealloc_trigger_callback + +def create_link_pypy(p, ob): + "NOT_RPYTHON: a link where the PyPy object contains some or all the data" + #print 'create_link_pypy\n\t%s\n\t%s' % (p, ob) + assert p not in _pypy2ob + #assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = _build_pypy_link(p) + _pypy2ob[p] = ob + _p_list.append(ob) + +def create_link_pyobj(p, ob): + """NOT_RPYTHON: a link where the PyObject contains all the data. + from_obj() will not work on this 'p'.""" + #print 'create_link_pyobj\n\t%s\n\t%s' % (p, ob) + assert p not in _pypy2ob + #assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = _build_pypy_link(p) + _o_list.append(ob) + +def from_obj(OB_PTR_TYPE, p): + "NOT_RPYTHON" + ob = _pypy2ob.get(p) + if ob is None: + return lltype.nullptr(OB_PTR_TYPE.TO) + assert lltype.typeOf(ob) == OB_PTR_TYPE + return ob + +def to_obj(Class, ob): + "NOT_RPYTHON" + link = ob.c_ob_pypy_link + if link == 0: + return None + p = _adr2pypy[link] + assert isinstance(p, Class) + return p + +def next_dead(OB_PTR_TYPE): + if len(_d_list) == 0: + return lltype.nullptr(OB_PTR_TYPE.TO) + ob = _d_list.pop() + assert lltype.typeOf(ob) == OB_PTR_TYPE + return ob + +def _collect(track_allocation=True): + """NOT_RPYTHON: for tests only. Emulates a GC collection. + Will invoke dealloc_trigger_callback() once if there are objects + whose _Py_Dealloc() should be called. + """ + def detach(ob, wr_list): + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + assert ob.c_ob_pypy_link + p = _adr2pypy[ob.c_ob_pypy_link] + assert p is not None + _adr2pypy[ob.c_ob_pypy_link] = None + wr_list.append((ob, weakref.ref(p))) + return p + + global _p_list, _o_list + wr_p_list = [] + new_p_list = [] + for ob in reversed(_p_list): + if ob.c_ob_refcnt not in (REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT): + new_p_list.append(ob) + else: + p = detach(ob, wr_p_list) + del _pypy2ob[p] + del p + ob = None + _p_list = Ellipsis + + wr_o_list = [] + for ob in reversed(_o_list): + detach(ob, wr_o_list) + _o_list = Ellipsis + + rgc.collect() # forces the cycles to be resolved and the weakrefs to die + rgc.collect() + rgc.collect() + + def attach(ob, wr, final_list): + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + p = wr() + if p is not None: + assert ob.c_ob_pypy_link + _adr2pypy[ob.c_ob_pypy_link] = p + final_list.append(ob) + return p + else: + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt >= REFCNT_FROM_PYPY_LIGHT: + ob.c_ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0: + lltype.free(ob, flavor='raw', + track_allocation=track_allocation) + else: + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + assert ob.c_ob_refcnt < int(REFCNT_FROM_PYPY_LIGHT * 0.99) + ob.c_ob_refcnt -= REFCNT_FROM_PYPY + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0: + _d_list.append(ob) + return None + + _p_list = new_p_list + for ob, wr in wr_p_list: + p = attach(ob, wr, _p_list) + if p is not None: + _pypy2ob[p] = ob + _o_list = [] + for ob, wr in wr_o_list: + attach(ob, wr, _o_list) + + if _d_list: + res = _dealloc_trigger_callback() + if res == "RETRY": + _collect(track_allocation=track_allocation) + +_keepalive_forever = set() +def _dont_free_any_more(): + "Make sure that any object still referenced won't be freed any more." + for ob in _p_list + _o_list: + _keepalive_forever.add(to_obj(object, ob)) + del _d_list[:] + +# ____________________________________________________________ + + +def _unspec_p(hop, v_p): + assert isinstance(v_p.concretetype, lltype.Ptr) + assert v_p.concretetype.TO._gckind == 'gc' + return hop.genop('cast_opaque_ptr', [v_p], resulttype=llmemory.GCREF) + +def _unspec_ob(hop, v_ob): + assert isinstance(v_ob.concretetype, lltype.Ptr) + assert v_ob.concretetype.TO._gckind == 'raw' + return hop.genop('cast_ptr_to_adr', [v_ob], resulttype=llmemory.Address) + +def _spec_p(hop, v_p): + assert v_p.concretetype == llmemory.GCREF + return hop.genop('cast_opaque_ptr', [v_p], + resulttype=hop.r_result.lowleveltype) + +def _spec_ob(hop, v_ob): + assert v_ob.concretetype == llmemory.Address + return hop.genop('cast_adr_to_ptr', [v_ob], + resulttype=hop.r_result.lowleveltype) + + +class Entry(ExtRegistryEntry): + _about_ = init + + def compute_result_annotation(self, s_dealloc_callback): + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_dealloc_callback, SomePtr) # ll-ptr-to-function + + def specialize_call(self, hop): + hop.exception_cannot_occur() + [v_dealloc_callback] = hop.inputargs(hop.args_r[0]) + hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) + + +class Entry(ExtRegistryEntry): + _about_ = (create_link_pypy, create_link_pyobj) + + def compute_result_annotation(self, s_p, s_ob): + pass + + def specialize_call(self, hop): + if self.instance is create_link_pypy: + name = 'gc_rawrefcount_create_link_pypy' + elif self.instance is create_link_pyobj: + name = 'gc_rawrefcount_create_link_pyobj' + v_p, v_ob = hop.inputargs(*hop.args_r) + hop.exception_cannot_occur() + hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)]) + + +class Entry(ExtRegistryEntry): + _about_ = from_obj + + def compute_result_annotation(self, s_OB_PTR_TYPE, s_p): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert (isinstance(s_p, annmodel.SomeInstance) or + annmodel.s_None.contains(s_p)) + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_p = hop.inputarg(hop.args_r[1], arg=1) + v_ob = hop.genop('gc_rawrefcount_from_obj', [_unspec_p(hop, v_p)], + resulttype = llmemory.Address) + return _spec_ob(hop, v_ob) + +class Entry(ExtRegistryEntry): + _about_ = to_obj + + def compute_result_annotation(self, s_Class, s_ob): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_ob, SomePtr) + assert s_Class.is_constant() + classdef = self.bookkeeper.getuniqueclassdef(s_Class.const) + return annmodel.SomeInstance(classdef, can_be_None=True) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.inputarg(hop.args_r[1], arg=1) + v_p = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], + resulttype = llmemory.GCREF) + return _spec_p(hop, v_p) + +class Entry(ExtRegistryEntry): + _about_ = next_dead + + def compute_result_annotation(self, s_OB_PTR_TYPE): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.genop('gc_rawrefcount_next_dead', [], + resulttype = llmemory.Address) + return _spec_ob(hop, v_ob) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -487,6 +487,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation __slots__ = ['_x', '_handle'] + _TYPE = llmemory.GCREF def __init__(self, x): self._x = x def __hash__(self): diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rawrefcount.py @@ -0,0 +1,268 @@ +import weakref +from rpython.rlib import rawrefcount, objectmodel, rgc +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.annlowlevel import llhelper +from rpython.translator.c.test.test_standalone import StandaloneTests +from rpython.config.translationoption import get_combined_translation_config + + +class W_Root(object): + def __init__(self, intval=0): + self.intval = intval + def __nonzero__(self): + raise Exception("you cannot do that, you must use space.is_true()") + +PyObjectS = lltype.Struct('PyObjectS', + ('c_ob_refcnt', lltype.Signed), + ('c_ob_pypy_link', lltype.Signed)) +PyObject = lltype.Ptr(PyObjectS) + + +class TestRawRefCount: + + def setup_method(self, meth): + rawrefcount.init() + + def test_create_link_pypy(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_pypy(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount.from_obj(PyObject, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_create_link_pyobj(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_pyobj(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_collect_p_dies(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + assert rawrefcount._p_list == [] + assert wr_ob() is None + assert wr_p() is None + + def test_collect_p_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') + + def test_collect_p_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') + + def test_collect_o_dies(self): + trigger = []; rawrefcount.init(lambda: trigger.append(1)) + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert trigger == [1] + assert rawrefcount.next_dead(PyObject) == ob + assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) + assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) + assert rawrefcount._o_list == [] + assert wr_p() is None + assert ob.c_ob_refcnt == 0 + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') + + def test_collect_o_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del p + rawrefcount._collect() + p = wr_p() + assert p is None # was unlinked + assert ob.c_ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 + assert rawrefcount._o_list == [] + assert rawrefcount.to_obj(W_Root, ob) == None + lltype.free(ob, flavor='raw') + + def test_collect_o_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._o_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert p.pyobj == ob + lltype.free(ob, flavor='raw') + + def test_collect_s_dies(self): + trigger = []; rawrefcount.init(lambda: trigger.append(1)) + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert trigger == [1] + assert rawrefcount._d_list == [ob] + assert rawrefcount._p_list == [] + assert wr_p() is None + assert ob.c_ob_refcnt == 0 + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') + + def test_collect_s_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_collect_s_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + +class TestTranslated(StandaloneTests): + + def test_full_translation(self): + class State: + pass + state = State() + state.seen = [] + def dealloc_trigger(): + state.seen.append(1) + + def make_p(): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount.from_obj(PyObject, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + return ob, p + + FTYPE = rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER + + def entry_point(argv): + ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger) + rawrefcount.init(ll_dealloc_trigger_callback) + ob, p = make_p() + if state.seen != []: + print "OB COLLECTED REALLY TOO SOON" + return 1 + rgc.collect() + if state.seen != []: + print "OB COLLECTED TOO SOON" + return 1 + objectmodel.keepalive_until_here(p) + p = None + rgc.collect() + if state.seen != [1]: + print "OB NOT COLLECTED" + return 1 + if rawrefcount.next_dead(PyObject) != ob: + print "NEXT_DEAD != OB" + return 1 + if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS): + print "NEXT_DEAD second time != NULL" + return 1 + print "OK!" + lltype.free(ob, flavor='raw') + return 0 + + self.config = get_combined_translation_config(translating=True) + self.config.translation.gc = "incminimark" + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data.startswith('OK!\n') diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -515,8 +515,10 @@ struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): - assert FIELDTYPE._hints.get('nolength', False) == False - arraycontainer = _array_of_known_length(FIELDTYPE) + if FIELDTYPE._hints.get('nolength', False): + arraycontainer = _array_of_unknown_length(FIELDTYPE) + else: + arraycontainer = _array_of_known_length(FIELDTYPE) arraycontainer._storage = ctypes.pointer( getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) @@ -567,6 +569,7 @@ raise Exception("internal ll2ctypes error - " "double conversion from lltype to ctypes?") # XXX don't store here immortal structures + print "LL2CTYPES:", addr ALLOCATED[addr] = self def _addressof_storage(self): @@ -579,6 +582,7 @@ self._check() # no double-frees # allow the ctypes object to go away now addr = ctypes.cast(self._storage, ctypes.c_void_p).value + print "LL2C FREE:", addr try: del ALLOCATED[addr] except KeyError: @@ -613,11 +617,14 @@ return object.__hash__(self) def __repr__(self): + if '__str__' in self._TYPE._adtmeths: + r = self._TYPE._adtmeths['__str__'](self) + else: + r = 'C object %s' % (self._TYPE,) if self._storage is None: - return '' % (self._TYPE,) + return '' % (r,) else: - return '' % (self._TYPE, - fixid(self._addressof_storage())) + return '<%s at 0x%x>' % (r, fixid(self._addressof_storage())) def __str__(self): return repr(self) @@ -942,7 +949,8 @@ REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) - container = lltype._struct(T.TO, carray.length) + length = getattr(carray, 'length', 9999) # XXX + container = lltype._struct(T.TO, length) else: # special treatment of 'OBJECT' subclasses if get_rtyper() and lltype._castdepth(REAL_TYPE, OBJECT) >= 0: diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -503,6 +503,12 @@ 'gc_gcflag_extra' : LLOp(), 'gc_add_memory_pressure': LLOp(), + 'gc_rawrefcount_init': LLOp(), + 'gc_rawrefcount_create_link_pypy': LLOp(), + 'gc_rawrefcount_create_link_pyobj': LLOp(), + 'gc_rawrefcount_from_obj': LLOp(sideeffects=False), + 'gc_rawrefcount_to_obj': LLOp(sideeffects=False), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -631,7 +631,8 @@ def CExternVariable(TYPE, name, eci, _CConstantClass=CConstant, sandboxsafe=False, _nowrapper=False, - c_type=None, getter_only=False): + c_type=None, getter_only=False, + declare_as_extern=(sys.platform != 'win32')): """Return a pair of functions - a getter and a setter - to access the given global C variable. """ @@ -661,7 +662,7 @@ c_setter = "void %(setter_name)s (%(c_type)s v) { %(name)s = v; }" % locals() lines = ["#include <%s>" % i for i in eci.includes] - if sys.platform != 'win32': + if declare_as_extern: lines.append('extern %s %s;' % (c_type, name)) lines.append(c_getter) if not getter_only: @@ -790,6 +791,12 @@ return length str2chararray._annenforceargs_ = [strtype, None, int] + # s[start:start+length] -> already-existing char[], + # all characters including zeros + def str2rawmem(s, array, start, length): + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, start, length) + # char* -> str # doesn't free char* def charp2str(cp): @@ -940,19 +947,19 @@ return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) = make_string_mappings(str) (unicode2wcharp, free_wcharp, wcharp2unicode, get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here, - wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, + wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem, ) = make_string_mappings(unicode) # char** diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -263,10 +263,11 @@ """An entry in a CConfig class that stands for an externally defined structure. """ - def __init__(self, name, interesting_fields, ifdef=None): + def __init__(self, name, interesting_fields, ifdef=None, adtmeths={}): self.name = name self.interesting_fields = interesting_fields self.ifdef = ifdef + self.adtmeths = adtmeths def prepare_code(self): if self.ifdef is not None: @@ -313,7 +314,9 @@ offset = info['fldofs ' + fieldname] size = info['fldsize ' + fieldname] sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != rffi.size_and_sign(fieldtype): + if is_array_nolength(fieldtype): + pass # ignore size and sign + elif (size, sign) != rffi.size_and_sign(fieldtype): fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) layout_addfield(layout, offset, fieldtype, fieldname) @@ -353,7 +356,7 @@ name = name[7:] else: hints['typedef'] = True - kwds = {'hints': hints} + kwds = {'hints': hints, 'adtmeths': self.adtmeths} return rffi.CStruct(name, *fields, **kwds) class SimpleType(CConfigEntry): @@ -682,8 +685,14 @@ def __repr__(self): return '' % (self.name, self.ctype) +def is_array_nolength(TYPE): + return isinstance(TYPE, lltype.Array) and TYPE._hints.get('nolength', False) + def layout_addfield(layout, offset, ctype, prefix): - size = _sizeof(ctype) + if is_array_nolength(ctype): + size = len(layout) - offset # all the rest of the struct + else: + size = _sizeof(ctype) name = prefix i = 0 while name in layout: diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -270,6 +270,19 @@ [("d_name", lltype.FixedSizeArray(rffi.CHAR, 1))]) assert dirent.c_d_name.length == 32 +def test_array_varsized_struct(): + dirent = rffi_platform.getstruct("struct dirent", + """ + struct dirent /* for this example only, not the exact dirent */ + { + int d_off; + char d_name[1]; + }; + """, + [("d_name", rffi.CArray(rffi.CHAR))]) + assert rffi.offsetof(dirent, 'c_d_name') == 4 + assert dirent.c_d_name == rffi.CArray(rffi.CHAR) + def test_has_0001(): assert rffi_platform.has("x", "int x = 3;") assert not rffi_platform.has("x", "") From pypy.commits at gmail.com Tue Jan 19 09:30:36 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 06:30:36 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: copy some code, rekill parts that I definitely want killed Message-ID: <569e488c.17941c0a.e7f46.25e1@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81855:e02a927b7f7a Date: 2016-01-19 15:29 +0100 http://bitbucket.org/pypy/pypy/changeset/e02a927b7f7a/ Log: copy some code, rekill parts that I definitely want killed diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -509,14 +509,16 @@ return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] +# Note: as a special case, "PyObject" is the pointer type in RPython, +# corresponding to "PyObject *" in C. We do that only for PyObject. +# For example, "PyTypeObject" is the struct type even in RPython. PyTypeObject = lltype.ForwardReference() PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -# It is important that these PyObjects are allocated in a raw fashion -# Thus we cannot save a forward pointer to the wrapped object -# So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) +PyObjectFields = (("ob_refcnt", lltype.Signed), + ("ob_pypy_link", lltype.Signed), + ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) @@ -827,6 +829,18 @@ space.fromcache(State).install_dll(eci) + def dealloc_trigger(): + print 'dealloc_trigger...' + while True: + ob = rawrefcount.next_dead(PyObject) + if not ob: + break + print ob + _Py_Dealloc(space, ob) + print 'dealloc_trigger DONE' + return "RETRY" + rawrefcount.init(dealloc_trigger) + # populate static data for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -134,107 +134,6 @@ #________________________________________________________ # refcounted object support -class RefcountState: - def __init__(self, space): - self.space = space - self.py_objects_w2r = {} # { w_obj -> raw PyObject } - self.py_objects_r2w = {} # { addr of raw PyObject -> w_obj } - - self.lifeline_dict = RWeakKeyDictionary(W_Root, PyOLifeline) - - self.borrow_mapping = {None: {}} - # { w_container -> { w_containee -> None } } - # the None entry manages references borrowed during a call to - # generic_cpy_call() - - # For tests - self.non_heaptypes_w = [] - - def _cleanup_(self): - assert self.borrow_mapping == {None: {}} - self.py_objects_r2w.clear() # is not valid anymore after translation - - def init_r2w_from_w2r(self): - """Rebuilds the dict py_objects_r2w on startup""" - for w_obj, obj in self.py_objects_w2r.items(): - ptr = rffi.cast(ADDR, obj) - self.py_objects_r2w[ptr] = w_obj - - def print_refcounts(self): - print "REFCOUNTS" - for w_obj, obj in self.py_objects_w2r.items(): - print "%r: %i" % (w_obj, obj.c_ob_refcnt) - - def get_from_lifeline(self, w_obj): - lifeline = self.lifeline_dict.get(w_obj) - if lifeline is not None: # make old PyObject ready for use in C code - py_obj = lifeline.pyo - assert py_obj.c_ob_refcnt == 0 - return py_obj - else: - return lltype.nullptr(PyObject.TO) - - def set_lifeline(self, w_obj, py_obj): - self.lifeline_dict.set(w_obj, - PyOLifeline(self.space, py_obj)) - - def make_borrowed(self, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - ref = make_ref(self.space, w_borrowed) - obj_ptr = rffi.cast(ADDR, ref) - - borrowees = self.borrow_mapping.setdefault(w_container, {}) - if w_borrowed in borrowees: - Py_DecRef(self.space, w_borrowed) # cancel incref from make_ref() - else: - borrowees[w_borrowed] = None - - return ref - - def reset_borrowed_references(self): - "Used in tests" - for w_container, w_borrowed in self.borrow_mapping.items(): - Py_DecRef(self.space, w_borrowed) - self.borrow_mapping = {None: {}} - - def delete_borrower(self, w_obj): - """ - Called when a potential container for borrowed references has lost its - last reference. Removes the borrowed references it contains. - """ - if w_obj in self.borrow_mapping: # move to lifeline __del__ - for w_containee in self.borrow_mapping[w_obj]: - self.forget_borrowee(w_containee) - del self.borrow_mapping[w_obj] - - def swap_borrow_container(self, container): - """switch the current default contained with the given one.""" - if container is None: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = {} - return old_container - else: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = container - for w_containee in old_container: - self.forget_borrowee(w_containee) - - def forget_borrowee(self, w_obj): - "De-register an object from the list of borrowed references" - ref = self.py_objects_w2r.get(w_obj, lltype.nullptr(PyObject.TO)) - if not ref: - if DEBUG_REFCOUNT: - print >>sys.stderr, "Borrowed object is already gone!" - return - - Py_DecRef(self.space, ref) - -class InvalidPointerException(Exception): - pass - DEBUG_REFCOUNT = False def debug_refcount(*args, **kwargs): @@ -382,68 +281,6 @@ # "'s type which is", rffi.charp2str(pto.c_tp_name) generic_cpy_call_dont_decref(space, pto.c_tp_dealloc, obj) -#___________________________________________________________ -# Support for "lifelines" -# -# Object structure must stay alive even when not referenced -# by any C code. - -class PyOLifeline(object): - def __init__(self, space, pyo): - self.pyo = pyo - self.space = space - - def __del__(self): - if self.pyo: - assert self.pyo.c_ob_refcnt == 0 - _Py_Dealloc(self.space, self.pyo) - self.pyo = lltype.nullptr(PyObject.TO) - # XXX handle borrowed objects here - -#___________________________________________________________ -# Support for borrowed references - -def make_borrowed_ref(space, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - if w_borrowed is None: - return lltype.nullptr(PyObject.TO) - - state = space.fromcache(RefcountState) - return state.make_borrowed(w_container, w_borrowed) - -class Reference: - def __init__(self, pyobj): - assert not isinstance(pyobj, W_Root) - self.pyobj = pyobj - - def get_ref(self, space): - return self.pyobj - - def get_wrapped(self, space): - return from_ref(space, self.pyobj) - -class BorrowPair(Reference): - """ - Delays the creation of a borrowed reference. - """ - def __init__(self, w_container, w_borrowed): - self.w_container = w_container - self.w_borrowed = w_borrowed - - def get_ref(self, space): - return make_borrowed_ref(space, self.w_container, self.w_borrowed) - - def get_wrapped(self, space): - return self.w_borrowed - -def borrow_from(container, borrowed): - return BorrowPair(container, borrowed) - -#___________________________________________________________ - @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) def _Py_HashPointer(space, ptr): return rffi.cast(lltype.Signed, ptr) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -1,8 +1,11 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError +from pypy.interpreter.executioncontext import AsyncAction from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.rdynload import DLLHANDLE +from rpython.rlib import rawrefcount import sys class State: @@ -11,6 +14,8 @@ self.reset() self.programname = lltype.nullptr(rffi.CCHARP.TO) self.version = lltype.nullptr(rffi.CCHARP.TO) + pyobj_dealloc_action = PyObjDeallocAction(space) + self.dealloc_trigger = lambda: pyobj_dealloc_action.fire() def reset(self): from pypy.module.cpyext.modsupport import PyMethodDef @@ -74,13 +79,15 @@ "This function is called when the program really starts" from pypy.module.cpyext.typeobject import setup_new_method_def - from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.api import INIT_FUNCTIONS + from pypy.module.cpyext.api import init_static_data_translated + + if we_are_translated(): + rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER, + self.dealloc_trigger)) + init_static_data_translated(space) setup_new_method_def(space) - if we_are_translated(): - refcountstate = space.fromcache(RefcountState) - refcountstate.init_r2w_from_w2r() for func in INIT_FUNCTIONS: func(space) @@ -133,3 +140,17 @@ w_dict = w_mod.getdict(space) w_copy = space.call_method(w_dict, 'copy') self.extensions[path] = w_copy + + +class PyObjDeallocAction(AsyncAction): + """An action that invokes _Py_Dealloc() on the dying PyObjects. + """ + + def perform(self, executioncontext, frame): + from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + + while True: + py_obj = rawrefcount.next_dead(PyObject) + if not py_obj: + break + _Py_Dealloc(self.space, py_obj) From pypy.commits at gmail.com Tue Jan 19 09:57:06 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 06:57:06 -0800 (PST) Subject: [pypy-commit] cffi default: issue #243: explicitly complain if ffi.def_extern() is called on the Message-ID: <569e4ec2.2457c20a.fab7.38c7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2613:8a31eab6e71f Date: 2016-01-19 15:56 +0100 http://bitbucket.org/cffi/cffi/changeset/8a31eab6e71f/ Log: issue #243: explicitly complain if ffi.def_extern() is called on the Python-side FFI object diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -699,6 +699,10 @@ # self._embedding = pysource + def def_extern(self, *args, **kwds): + raise ValueError("ffi.def_extern() is only available on API-mode FFI " + "objects") + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -419,3 +419,7 @@ ]: x = ffi.sizeof(name) assert 1 <= x <= 16 + + def test_ffi_def_extern(self): + ffi = FFI() + py.test.raises(ValueError, ffi.def_extern) From pypy.commits at gmail.com Tue Jan 19 10:05:45 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 07:05:45 -0800 (PST) Subject: [pypy-commit] pypy default: move file Message-ID: <569e50c9.cb571c0a.a54d.36d0@mx.google.com> Author: Armin Rigo Branch: Changeset: r81856:b24488e83eda Date: 2016-01-19 16:04 +0100 http://bitbucket.org/pypy/pypy/changeset/b24488e83eda/ Log: move file diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch rename from pypy/module/cpyext/Doc_stubgen_enable.patch rename to pypy/module/cpyext/patches/Doc_stubgen_enable.patch From pypy.commits at gmail.com Tue Jan 19 10:22:45 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 19 Jan 2016 07:22:45 -0800 (PST) Subject: [pypy-commit] =?utf-8?q?cffi_default=3A_clarify_=28thanks_R=C3=A9?= =?utf-8?b?bWkp?= Message-ID: <569e54c5.c8ac1c0a.3d91d.36c2@mx.google.com> Author: Armin Rigo Branch: Changeset: r2614:fca64086ec22 Date: 2016-01-19 16:22 +0100 http://bitbucket.org/cffi/cffi/changeset/fca64086ec22/ Log: clarify (thanks Rémi) diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -476,16 +476,20 @@ Python function object that is, at runtime, attached with ``@ffi.def_extern()``. -The ``@ffi.def_extern()`` decorator should be applied to a global -function, but *only once.* This is because each function from the cdef with -``extern "Python"`` turns into only one C function. To support some -corner cases, it is possible to redefine the attached Python function -by calling ``@ffi.def_extern()`` again---but this is not recommended! -Better write the single global Python function more flexibly in the -first place. Calling ``@ffi.def_extern()`` again changes the C logic -to call the new Python function; the old Python function is not -callable any more and the C function pointer you get from -``lib.my_function`` is always the same. +The ``@ffi.def_extern()`` decorator should be applied to **global +functions,** one for each ``extern "Python"`` function of the same +name. + +To support some corner cases, it is possible to redefine the attached +Python function by calling ``@ffi.def_extern()`` again for the same +name---but this is not recommended! Better attach a single global +Python function for this name, and write it more flexibly in the first +place. This is because each ``extern "Python"`` function turns into +only one C function. Calling ``@ffi.def_extern()`` again changes this +function's C logic to call the new Python function; the old Python +function is not callable any more. The C function pointer you get +from ``lib.my_function`` is always this C function's address, i.e. it +remains the same. Extern "Python" and ``void *`` arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From pypy.commits at gmail.com Tue Jan 19 13:25:59 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 10:25:59 -0800 (PST) Subject: [pypy-commit] pypy default: don't create 2-tuples all the time in mapdict. pass the content along instead Message-ID: <569e7fb7.2aacc20a.cdeb6.ffff8f5b@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81857:170318ed8b6b Date: 2016-01-19 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/170318ed8b6b/ Log: don't create 2-tuples all the time in mapdict. pass the content along instead this changes nothing in the warmed up case, but it means the JIT doesn't have to virtualize tuples all the time just because. also, the interpreter is a bit faster. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -30,10 +30,10 @@ assert isinstance(terminator, Terminator) self.terminator = terminator - def read(self, obj, selector): - attr = self.find_map_attr(selector) + def read(self, obj, name, index): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._read_terminator(obj, selector) + return self.terminator._read_terminator(obj, name, index) if ( jit.isconstant(attr.storageindex) and jit.isconstant(obj) and @@ -47,39 +47,39 @@ def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) - def write(self, obj, selector, w_value): - attr = self.find_map_attr(selector) + def write(self, obj, name, index, w_value): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._write_terminator(obj, selector, w_value) + return self.terminator._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True obj._mapdict_write_storage(attr.storageindex, w_value) return True - def delete(self, obj, selector): + def delete(self, obj, name, index): pass - def find_map_attr(self, selector): + def find_map_attr(self, name, index): if jit.we_are_jitted(): # hack for the jit: # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._find_map_attr_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(name, index) else: - return self._find_map_attr_indirection(selector) + return self._find_map_attr_indirection(name, index) @jit.elidable def _find_map_attr_jit_pure(self, name, index): - return self._find_map_attr_indirection((name, index)) + return self._find_map_attr_indirection(name, index) @jit.dont_look_inside - def _find_map_attr_indirection(self, selector): + def _find_map_attr_indirection(self, name, index): if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(selector) - return self._find_map_attr(selector) + return self._find_map_attr_cache(name, index) + return self._find_map_attr(name, index) @jit.dont_look_inside - def _find_map_attr_cache(self, selector): + def _find_map_attr_cache(self, name, index): space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -87,31 +87,36 @@ attrs_as_int = objectmodel.current_object_addr_as_int(self) # ^^^Note: see comment in typeobject.py for # _pure_lookup_where_with_method_cache() - hash_selector = objectmodel.compute_hash(selector) + + # unrolled hash computation for 2-tuple + c1 = 0x345678 + c2 = 1000003 + hash_name = objectmodel.compute_hash(name) + hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index) product = intmask(attrs_as_int * hash_selector) attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[attr_hash] - if cached_selector == selector: + cached_name = cache.names[attr_hash] + cached_index = cache.indexes[attr_hash] + if cached_name == name and cached_index == index: attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 return attr - attr = self._find_map_attr(selector) + attr = self._find_map_attr(name, index) cache.attrs[attr_hash] = self - cache.selectors[attr_hash] = selector + cache.names[attr_hash] = name + cache.indexes[attr_hash] = index cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 return attr - def _find_map_attr(self, selector): + def _find_map_attr(self, name, index): while isinstance(self, PlainAttribute): - if selector == self.selector: + if name == self.name and index == self.index: return self self = self.back return None @@ -137,23 +142,22 @@ @jit.elidable def _get_new_attr(self, name, index): - selector = name, index cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} - attr = cache.get(selector, None) + attr = cache.get((name, index), None) if attr is None: - attr = PlainAttribute(selector, self) - cache[selector] = attr + attr = PlainAttribute(name, index, self) + cache[name, index] = attr return attr - @jit.look_inside_iff(lambda self, obj, selector, w_value: + @jit.look_inside_iff(lambda self, obj, name, index, w_value: jit.isconstant(self) and - jit.isconstant(selector[0]) and - jit.isconstant(selector[1])) - def add_attr(self, obj, selector, w_value): + jit.isconstant(name) and + jit.isconstant(index)) + def add_attr(self, obj, name, index, w_value): # grumble, jit needs this - attr = self._get_new_attr(selector[0], selector[1]) + attr = self._get_new_attr(name, index) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() @@ -189,11 +193,11 @@ AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls - def _read_terminator(self, obj, selector): + def _read_terminator(self, obj, name, index): return None - def _write_terminator(self, obj, selector, w_value): - obj._get_mapdict_map().add_attr(obj, selector, w_value) + def _write_terminator(self, obj, name, index, w_value): + obj._get_mapdict_map().add_attr(obj, name, index, w_value) return True def copy(self, obj): @@ -231,40 +235,40 @@ class NoDictTerminator(Terminator): - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: return False - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) class DevolvedDictTerminator(Terminator): - def _read_terminator(self, obj, selector): - if selector[1] == DICT: + def _read_terminator(self, obj, name, index): + if index == DICT: space = self.space w_dict = obj.getdict(space) - return space.finditem_str(w_dict, selector[0]) - return Terminator._read_terminator(self, obj, selector) + return space.finditem_str(w_dict, name) + return Terminator._read_terminator(self, obj, name, index) - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: space = self.space w_dict = obj.getdict(space) - space.setitem_str(w_dict, selector[0], w_value) + space.setitem_str(w_dict, name, w_value) return True - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) - def delete(self, obj, selector): + def delete(self, obj, name, index): from pypy.interpreter.error import OperationError - if selector[1] == DICT: + if index == DICT: space = self.space w_dict = obj.getdict(space) try: - space.delitem(w_dict, space.wrap(selector[0])) + space.delitem(w_dict, space.wrap(name)) except OperationError, ex: if not ex.match(space, space.w_KeyError): raise return Terminator.copy(self, obj) - return Terminator.delete(self, obj, selector) + return Terminator.delete(self, obj, name, index) def remove_dict_entries(self, obj): assert 0, "should be unreachable" @@ -276,27 +280,28 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?'] - def __init__(self, selector, back): + def __init__(self, name, index, back): AbstractAttribute.__init__(self, back.space, back.terminator) - self.selector = selector + self.name = name + self.index = index self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False def _copy_attr(self, obj, new_obj): - w_value = self.read(obj, self.selector) - new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) + w_value = self.read(obj, self.name, self.index) + new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) - def delete(self, obj, selector): - if selector == self.selector: + def delete(self, obj, name, index): + if name == self.name and index == self.index: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True return self.back.copy(obj) - new_obj = self.back.delete(obj, selector) + new_obj = self.back.delete(obj, name, index) if new_obj is not None: self._copy_attr(obj, new_obj) return new_obj @@ -315,14 +320,14 @@ return new_obj def search(self, attrtype): - if self.selector[1] == attrtype: + if self.index == attrtype: return self return self.back.search(attrtype) def materialize_r_dict(self, space, obj, dict_w): new_obj = self.back.materialize_r_dict(space, obj, dict_w) - if self.selector[1] == DICT: - w_attr = space.wrap(self.selector[0]) + if self.index == DICT: + w_attr = space.wrap(self.name) dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) @@ -330,12 +335,12 @@ def remove_dict_entries(self, obj): new_obj = self.back.remove_dict_entries(obj) - if self.selector[1] != DICT: + if self.index != DICT: self._copy_attr(obj, new_obj) return new_obj def __repr__(self): - return "" % (self.selector, self.storageindex, self.back) + return "" % (self.name, self.index, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to @@ -347,8 +352,8 @@ assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE - self._empty_selector = (None, INVALID) - self.selectors = [self._empty_selector] * SIZE + self.names = [None] * SIZE + self.indexes = [INVALID] * SIZE self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} @@ -357,8 +362,9 @@ def clear(self): for i in range(len(self.attrs)): self.attrs[i] = None - for i in range(len(self.selectors)): - self.selectors[i] = self._empty_selector + for i in range(len(self.names)): + self.names[i] = None + self.indexes[i] = INVALID for i in range(len(self.cached_attrs)): self.cached_attrs[i] = None @@ -388,20 +394,20 @@ # objspace interface def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, (attrname, DICT)) + return self._get_mapdict_map().read(self, attrname, DICT) def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, (attrname, DICT), w_value) + return self._get_mapdict_map().write(self, attrname, DICT, w_value) def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, (attrname, DICT)) + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False self._become(new_obj) return True def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, ("dict", SPECIAL)) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) return w_dict @@ -409,7 +415,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag return w_dict @@ -425,7 +431,7 @@ # shell that continues to delegate to 'self'. if type(w_olddict.get_strategy()) is MapDictStrategy: w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag def getclass(self, space): @@ -443,16 +449,16 @@ self._init_empty(w_subtype.terminator) def getslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - return self._get_mapdict_map().read(self, key) + index = SLOTS_STARTING_FROM + slotindex + return self._get_mapdict_map().read(self, "slot", index) def setslotvalue(self, slotindex, w_value): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - self._get_mapdict_map().write(self, key, w_value) + index = SLOTS_STARTING_FROM + slotindex + self._get_mapdict_map().write(self, "slot", index, w_value) def delslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - new_obj = self._get_mapdict_map().delete(self, key) + index = SLOTS_STARTING_FROM + slotindex + new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False self._become(new_obj) @@ -462,7 +468,7 @@ def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, ("weakref", SPECIAL)) + lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) if lifeline is None: return None assert isinstance(lifeline, WeakrefLifeline) @@ -472,11 +478,11 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, ("weakref", SPECIAL), weakreflifeline) + self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) setweakref._cannot_really_call_random_things_ = True def delweakref(self): - self._get_mapdict_map().write(self, ("weakref", SPECIAL), None) + self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True class ObjectMixin(object): @@ -721,7 +727,7 @@ curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) if curr is None: raise KeyError - key = curr.selector[0] + key = curr.name w_value = self.getitem_str(w_dict, key) w_key = self.space.wrap(key) self.delitem(w_dict, w_key) @@ -758,7 +764,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr return None @@ -780,7 +786,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name return self.w_obj.getdictvalue(self.space, attr) return None @@ -801,7 +807,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr, self.w_obj.getdictvalue(self.space, attr) return None, None @@ -884,9 +890,9 @@ _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # - selector = ("", INVALID) + attrname, index = ("", INVALID) if w_descr is None: - selector = (name, DICT) # common case: no such attr in the class + attrname, index = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, MutableCell): pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): @@ -894,20 +900,21 @@ # (if any) has no relevance. from pypy.interpreter.typedef import Member if isinstance(w_descr, Member): # it is a slot -- easy case - selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) + attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. - selector = (name, DICT) + attrname = name + index = DICT # - if selector[1] != INVALID: - attr = map.find_map_attr(selector) + if index != INVALID: + attr = map.find_map_attr(attrname, index) if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.find_map_attr will always return None if selector[1]==DICT. + # map.find_map_attr will always return None if index==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -34,8 +34,8 @@ def test_plain_attribute(): w_cls = "class" - aa = PlainAttribute(("b", DICT), - PlainAttribute(("a", DICT), + aa = PlainAttribute("b", DICT, + PlainAttribute("a", DICT, Terminator(space, w_cls))) assert aa.space is space assert aa.terminator.w_cls is w_cls @@ -63,16 +63,16 @@ def test_huge_chain(): current = Terminator(space, "cls") for i in range(20000): - current = PlainAttribute((str(i), DICT), current) - assert current.find_map_attr(("0", DICT)).storageindex == 0 + current = PlainAttribute(str(i), DICT, current) + assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute(("b", DICT), PlainAttribute(("a", DICT), Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None - bb = PlainAttribute(("C", SPECIAL), PlainAttribute(("A", SLOTS_STARTING_FROM), aa)) + bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa)) assert bb.search(DICT) is aa assert bb.search(SLOTS_STARTING_FROM) is bb.back assert bb.search(SPECIAL) is bb @@ -320,7 +320,7 @@ d = {} w_d = FakeDict(d) - flag = obj.map.write(obj, ("dict", SPECIAL), w_d) + flag = obj.map.write(obj, "dict", SPECIAL, w_d) assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} From pypy.commits at gmail.com Tue Jan 19 13:26:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 10:26:01 -0800 (PST) Subject: [pypy-commit] pypy default: merge Message-ID: <569e7fb9.c4b61c0a.351b2.ffff8653@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r81858:0fe27c291b2e Date: 2016-01-19 19:25 +0100 http://bitbucket.org/pypy/pypy/changeset/0fe27c291b2e/ Log: merge diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch rename from pypy/module/cpyext/Doc_stubgen_enable.patch rename to pypy/module/cpyext/patches/Doc_stubgen_enable.patch From pypy.commits at gmail.com Tue Jan 19 13:53:13 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Jan 2016 10:53:13 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Backed out changeset 2906290a8ebc Message-ID: <569e8619.03231c0a.b3089.ffff8411@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81859:7ec528e70b0f Date: 2016-01-19 18:52 +0000 http://bitbucket.org/pypy/pypy/changeset/7ec528e70b0f/ Log: Backed out changeset 2906290a8ebc The issue is that creating the external function stubs prevents the functions that need to be sandbox-transformed from being annotated correctly. diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -358,10 +358,6 @@ return self.descs[obj_key] except KeyError: if isinstance(pyobj, types.FunctionType): - if self.annotator.translator.config.translation.sandbox: - if hasattr(pyobj, '_ptr') and not getattr(pyobj._ptr._obj, '_safe_not_sandboxed', True): - from rpython.translator.sandbox.rsandbox import get_sandbox_stub - pyobj = get_sandbox_stub(pyobj._ptr._obj) result = description.FunctionDesc(self, pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -903,12 +903,26 @@ funcgen.implementation_end() def new_funcnode(db, T, obj, forcename=None): + sandbox = db.sandbox and need_sandboxing(obj) + if sandbox: + if getattr(obj, 'external', None) is not None: + obj.__dict__['graph'] = rsandbox.get_sandbox_stub( + obj, db.translator.rtyper) + obj.__dict__.pop('_safe_not_sandboxed', None) + obj.__dict__.pop('external', None) + if forcename: name = forcename else: name = _select_name(db, obj) return FuncNode(db, T, obj, name) +def need_sandboxing(fnobj): + if hasattr(fnobj, '_safe_not_sandboxed'): + return not fnobj._safe_not_sandboxed + else: + return "if_external" + def select_function_code_generators(fnobj, db, functionname): if hasattr(fnobj, 'graph'): exception_policy = getattr(fnobj, 'exception_policy', None) diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -117,11 +117,13 @@ dump_string = rmarshal.get_marshaller(str) load_int = rmarshal.get_loader(int) -def get_sandbox_stub(fnobj): - """Build always-raising stub function for unsupported external function.""" +def get_sandbox_stub(fnobj, rtyper): + """Build always-raising graph for unsupported external function.""" fnname = fnobj._name + args_s, s_result = sig_ll(fnobj) msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) - return make_stub(fnname, msg) + execute = make_stub(fnname, msg) + return _annotate(rtyper, execute, args_s, s_result) def get_external_function_sandbox_graph(fnobj, rtyper): """Build the graph of a helper trampoline function to be used diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -8,7 +8,6 @@ from rpython.translator.interactive import Translation from rpython.translator.sandbox.sandlib import read_message, write_message from rpython.translator.sandbox.sandlib import write_exception -from rpython.translator.tool.cbuild import ExternalCompilationInfo if hasattr(signal, 'alarm'): _orig_read_message = read_message @@ -293,16 +292,6 @@ rescode = pipe.wait() assert rescode == 0 -def test_llexternal(): - c_foo = rffi.llexternal('foo', [], rffi.INT) - def f(argv): - try: - c_foo() - except: - pass - return 0 - compile(f) # Check that this doesn't crash - class TestPrintedResults: def run(self, entry_point, args, expected): From pypy.commits at gmail.com Tue Jan 19 14:04:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Jan 2016 11:04:37 -0800 (PST) Subject: [pypy-commit] pypy exctrans: cleanup Message-ID: <569e88c5.8f7e1c0a.d8754.ffff9254@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81860:068216680608 Date: 2016-01-19 19:03 +0000 http://bitbucket.org/pypy/pypy/changeset/068216680608/ Log: cleanup diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -800,7 +800,8 @@ comma = '' expr += comma i = expr.find('\n') - if i<0: i = len(expr) + if i < 0: + i = len(expr) expr = '%s\t/* %s */%s' % (expr[:i], decoration, expr[i:]) return expr.split('\n') @@ -871,7 +872,7 @@ while start < len(localnames): # pack the local declarations over as few lines as possible total = lengths[start] + 8 - end = start+1 + end = start + 1 while total + lengths[end] < 77: total += lengths[end] + 1 end += 1 diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -35,7 +35,8 @@ sandboxsafe=True) - at signature(types.int(), types.ptr(rffi.CCHARP.TO), types.int(), returns=types.none()) + at signature(types.int(), types.ptr(rffi.CCHARP.TO), types.int(), + returns=types.none()) def writeall_not_sandboxed(fd, buf, length): while length > 0: size = rffi.cast(rffi.SIZE_T, length) @@ -83,15 +84,24 @@ return loader def reraise_error(error, loader): - if error == 1: raise OSError(load_int(loader), "external error") - elif error == 2: raise IOError - elif error == 3: raise OverflowError - elif error == 4: raise ValueError - elif error == 5: raise ZeroDivisionError - elif error == 6: raise MemoryError - elif error == 7: raise KeyError - elif error == 8: raise IndexError - else: raise RuntimeError + if error == 1: + raise OSError(load_int(loader), "external error") + elif error == 2: + raise IOError + elif error == 3: + raise OverflowError + elif error == 4: + raise ValueError + elif error == 5: + raise ZeroDivisionError + elif error == 6: + raise MemoryError + elif error == 7: + raise KeyError + elif error == 8: + raise IndexError + else: + raise RuntimeError @signature(types.str(), returns=types.impossible()) @@ -102,7 +112,9 @@ raise RuntimeError(msg) # XXX in RPython, the msg is ignored def make_stub(fnname, msg): + """Build always-raising stub function to replace unsupported external.""" log.WARNING(msg) + def execute(*args): not_implemented_stub(msg) execute.__name__ = 'sandboxed_%s' % (fnname,) @@ -118,31 +130,12 @@ load_int = rmarshal.get_loader(int) def get_sandbox_stub(fnobj, rtyper): - """Build always-raising graph for unsupported external function.""" fnname = fnobj._name args_s, s_result = sig_ll(fnobj) msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) execute = make_stub(fnname, msg) return _annotate(rtyper, execute, args_s, s_result) -def get_external_function_sandbox_graph(fnobj, rtyper): - """Build the graph of a helper trampoline function to be used - in place of real calls to the external function 'fnobj'. The - trampoline marshals its input arguments, dumps them to STDOUT, - and waits for an answer on STDIN. - """ - fnname = fnobj._name - if hasattr(fnobj, 'graph'): - graph = fnobj.graph - args_s = [v.annotation for v in graph.getargs()] - s_result = graph.getreturnvar().annotation - else: - # pure external function - fall back to the annotations - # corresponding to the ll types - args_s, s_result = sig_ll(fnobj) - execute = make_sandbox_trampoline(fnname, args_s, s_result) - return _annotate(rtyper, execute, args_s, s_result) - def make_sandbox_trampoline(fnname, args_s, s_result): """Create a trampoline function with the specified signature. From pypy.commits at gmail.com Tue Jan 19 14:06:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:01 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: merge default Message-ID: <569e8919.625dc20a.abb5f.02b3@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81861:bde790a86620 Date: 2016-01-15 20:56 +0100 http://bitbucket.org/pypy/pypy/changeset/bde790a86620/ Log: merge default (superficial merge, will need fixes to integrate the two approaches) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -30,7 +30,7 @@ jump(..., descr=...) """) - def test_load_attr(self): + def test_load_immutable_attr(self): src = ''' class A(object): pass diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -114,13 +114,16 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - guard_not_invalidated(descr=...) i12 = int_is_true(i4) guard_true(i12, descr=...) - i14 = int_add_ovf(i13, 2) + guard_not_invalidated(descr=...) + i13 = int_add_ovf(i8, i9) guard_no_overflow(descr=...) - i13 = int_add_ovf(i14, 2) + i10 = int_mul_ovf(2, i61) guard_no_overflow(descr=...) + i14 = int_add_ovf(i13, i10) + guard_no_overflow(descr=...) + setfield_gc(p7, i11, descr=...) i17 = int_sub_ovf(i4, 1) guard_no_overflow(descr=...) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -41,11 +41,13 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - guard_not_invalidated(descr=...) i53 = int_lt(i48, i27) guard_true(i53, descr=...) - i54 = int_add(i48, 1) + guard_not_invalidated(descr=...) + i54 = int_add_ovf(i48, i47) + guard_no_overflow(descr=...) --TICK-- + i58 = arraylen_gc(p43, descr=...) jump(..., descr=...) """) diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -10,17 +10,13 @@ DictStrategy, ObjectDictStrategy, _never_equal_to_string, create_iterator_classes) from pypy.objspace.std.typeobject import ( - MutableCell, IntMutableCell, ObjectMutableCell, write_cell) + MutableCell, IntMutableCell, ObjectMutableCell, write_cell, + unwrap_cell) class VersionTag(object): pass -def unwrap_cell(space, w_value): - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) - return w_value - def _wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -4,7 +4,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.dictmultiobject import ( DictStrategy, create_iterator_classes) -from pypy.objspace.std.typeobject import unwrap_cell +from pypy.objspace.std.typeobject import unwrap_cell_iftypeversion class DictProxyStrategy(DictStrategy): @@ -84,11 +84,12 @@ return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): - return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] + return [unwrap_cell_iftypeversion(self.space, w_value) + for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] def items(self, w_dict): space = self.space - return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)]) + return [space.newtuple([space.wrap(key), unwrap_cell_iftypeversion(self.space, w_value)]) for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()] def clear(self, w_dict): @@ -109,6 +110,6 @@ def wrapkey(space, key): return space.wrap(key) def wrapvalue(space, value): - return unwrap_cell(space, value) + return unwrap_cell_iftypeversion(space, value) create_iterator_classes(DictProxyStrategy) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -10,8 +10,9 @@ BaseValueIterator, BaseItemIterator, _never_equal_to_string, W_DictObject, ) -from pypy.objspace.std.typeobject import MutableCell - +from pypy.objspace.std.typeobject import ( + MutableCell, IntMutableCell, FloatMutableCell, ObjectMutableCell, + unwrap_cell) # ____________________________________________________________ # attribute shapes @@ -45,20 +46,16 @@ if w_res is not None: return w_res if ( - jit.isconstant(attr.storageindex) and + jit.isconstant(attr) and jit.isconstant(obj) and not attr.ever_mutated ): - return self._pure_mapdict_read_storage(obj, attr.storageindex) + return attr._pure_read(obj) else: - w_res = obj._mapdict_read_storage(attr.storageindex) + result = obj._mapdict_read_storage(attr.storageindex) if jit.we_are_jitted() and attr.class_is_known(): - jit.record_exact_class(w_res, attr.read_constant_cls()) - return w_res - - @jit.elidable - def _pure_mapdict_read_storage(self, obj, storageindex): - return obj._mapdict_read_storage(storageindex) + jit.record_exact_class(result, attr.read_constant_cls()) + return attr._read_cell(result) def write(self, obj, selector, w_value): attr = self.find_map_attr(selector) @@ -70,7 +67,9 @@ # if this path is taken, the storage is already filled from the time we # did the map transition. Therefore, if the value profiler says so, we # can not do the write - if not write_unnecessary: + cell = obj._mapdict_read_storage(attr.storageindex) + w_value = attr._write_cell(cell, w_value) + if write_unnecessary and w_value is not None: obj._mapdict_write_storage(attr.storageindex, w_value) return True @@ -172,6 +171,7 @@ def add_attr(self, obj, selector, w_value): # grumble, jit needs this attr = self._get_new_attr(selector[0], selector[1]) + w_value = attr._write_cell(None, w_value) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() @@ -188,6 +188,8 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) + w_value = attr._write_cell(None, w_value) + assert w_value is not None obj._mapdict_write_storage(attr.storageindex, w_value) attr.see_write(w_value) @@ -296,7 +298,8 @@ class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['selector', 'storageindex', 'back', + 'ever_mutated?', 'can_contain_mutable_cell?'] objectmodel.import_from_mixin(valueprof.ValueProf) def __init__(self, selector, back): @@ -307,6 +310,18 @@ self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False self.init_valueprof('%s.%s' % (back.terminator.w_cls.name if back.terminator.w_cls else '???', selector[0])) + # this flag means: at some point there was an instance that used a + # derivative of this map that had a MutableCell stored into the + # corresponding field. + # if the flag is False, we don't need to unbox the attribute. + self.can_contain_mutable_cell = False + + @jit.elidable + def _pure_read(self, obj): + # this is safe even if the mapdict stores a mutable cell. the cell can + # only be changed is ever_mutated is set to True + result = obj._mapdict_read_storage(self.storageindex) + return self._read_cell(result) # ____________________________________________________________ # methods for ValueProf mixin @@ -320,6 +335,37 @@ return w_obj.intval # ____________________________________________________________ + def _read_cell(self, w_cell): + if not self.can_contain_mutable_cell: + return w_cell + return unwrap_cell(self.space, w_cell) + + def _write_cell(self, w_cell, w_value): + from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject + assert not isinstance(w_cell, ObjectMutableCell) + if type(w_value) is W_IntObject: + if isinstance(w_cell, IntMutableCell): + w_cell.intvalue = w_value.intval + return None + check = self._ensure_can_contain_mutable_cell() + assert check + return IntMutableCell(w_value.intval) + if type(w_value) is W_FloatObject: + if isinstance(w_cell, FloatMutableCell): + w_cell.floatvalue = w_value.floatval + return None + check = self._ensure_can_contain_mutable_cell() + assert check + return FloatMutableCell(w_value.floatval) + return w_value + + @jit.elidable + def _ensure_can_contain_mutable_cell(self): + if not self.can_contain_mutable_cell: + self.can_contain_mutable_cell = True + return True + def _copy_attr(self, obj, new_obj): w_value = self.read(obj, self.selector) new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) @@ -357,7 +403,8 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) + dict_w[w_attr] = unwrap_cell( + space, obj._mapdict_read_storage(self.storageindex)) else: self._copy_attr(obj, new_obj) return new_obj @@ -898,7 +945,8 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.storageindex) + return unwrap_cell( + map.space, w_obj._mapdict_read_storage(entry.storageindex)) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -943,7 +991,8 @@ # Note that if map.terminator is a DevolvedDictTerminator, # map.find_map_attr will always return None if selector[1]==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) - return w_obj._mapdict_read_storage(attr.storageindex) + return unwrap_cell( + space, w_obj._mapdict_read_storage(attr.storageindex)) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -109,23 +109,27 @@ assert obj2.map is obj.map def test_attr_immutability(monkeypatch): + from pypy.objspace.std.intobject import W_IntObject cls = Class() obj = cls.instantiate() - obj.setdictvalue(space, "a", 10) - obj.setdictvalue(space, "b", 20) - obj.setdictvalue(space, "b", 30) - assert obj.storage == [10, 30] + obj.setdictvalue(space, "a", W_IntObject(10)) + obj.setdictvalue(space, "b", W_IntObject(20)) + obj.setdictvalue(space, "b", W_IntObject(30)) + mutcella, mutcellb = obj.storage + assert mutcella.intvalue == 10 + assert mutcellb.intvalue == 30 assert obj.map.ever_mutated == True assert obj.map.back.ever_mutated == False indices = [] + orig_pure_read = PlainAttribute._pure_read - def _pure_mapdict_read_storage(obj, storageindex): - assert storageindex == 0 - indices.append(storageindex) - return obj._mapdict_read_storage(storageindex) + def _pure_read(self, obj): + assert self.storageindex == 0 + indices.append(self.storageindex) + return orig_pure_read(self, obj) - obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + monkeypatch.setattr(PlainAttribute, "_pure_read", _pure_read) monkeypatch.setattr(jit, "isconstant", lambda c: True) assert obj.getdictvalue(space, "a") == 10 @@ -134,16 +138,20 @@ assert indices == [0, 0] obj2 = cls.instantiate() - obj2.setdictvalue(space, "a", 15) - obj2.setdictvalue(space, "b", 25) + obj2.setdictvalue(space, "a", W_IntObject(15)) + obj2.setdictvalue(space, "b", W_IntObject(25)) + mutcella, mutcellb = obj2.storage assert obj2.map is obj.map assert obj2.map.ever_mutated == True assert obj2.map.back.ever_mutated == False # mutating obj2 changes the map - obj2.setdictvalue(space, "a", 50) + obj2.setdictvalue(space, "a", W_IntObject(50)) assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map + assert obj2.storage[0] is mutcella + assert obj2.storage[1] is mutcellb + def test_attr_immutability_delete(): cls = Class() @@ -155,6 +163,94 @@ assert obj.map.ever_mutated == True assert obj.map is map1 +def test_immutable_with_mutcell(): + # even an immutable attribute will be stored as a mutcell. The reason is + # that then the type of the attribute is more predictable (eg always + # IntMutableCell and sometimes IntMutableCell and sometimes W_IntObject) + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + # make sure the attribute counts as mutable + obj.setdictvalue(space, "a", W_IntObject(4)) + # not wrapped because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 4 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.intvalue == 4 + + +def test_mutcell_not_immutable(): + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + # make sure the attribute counts as mutable + obj.setdictvalue(space, "a", W_IntObject(4)) + obj.setdictvalue(space, "a", W_IntObject(5)) + assert obj.map.ever_mutated + + obj = cls.instantiate() + obj.setdictvalue(space, "a", W_IntObject(5)) + # not wrapped because of the FakeSpace :-( + assert obj.getdictvalue(space, "a") == 5 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.intvalue == 5 + + obj.setdictvalue(space, "a", W_IntObject(6)) + assert obj.getdictvalue(space, "a") == 6 # FakeSpace again + mutcell1 = obj._mapdict_read_storage(0) + assert mutcell1.intvalue == 6 + assert mutcell is mutcell1 + + obj.setdictvalue(space, "a", W_IntObject(7)) + assert obj.getdictvalue(space, "a") == 7 # FakeSpace again + mutcell2 = obj._mapdict_read_storage(0) + assert mutcell2.intvalue == 7 + assert mutcell2 is mutcell1 + + +def test_mutcell_not_immutable_float(): + from pypy.objspace.std.floatobject import W_FloatObject + cls = Class() + obj = cls.instantiate() + # make sure the attribute counts as mutable + obj.setdictvalue(space, "a", W_FloatObject(4.43)) + obj.setdictvalue(space, "a", W_FloatObject(5.43)) + assert obj.map.ever_mutated + + obj = cls.instantiate() + obj.setdictvalue(space, "a", W_FloatObject(5.43)) + assert obj.getdictvalue(space, "a") == 5.43 + mutcell = obj._mapdict_read_storage(0) + assert mutcell.floatvalue == 5.43 + + obj.setdictvalue(space, "a", W_FloatObject(6.43)) + assert obj.getdictvalue(space, "a") == 6.43 + mutcell1 = obj._mapdict_read_storage(0) + assert mutcell1.floatvalue == 6.43 + assert mutcell is mutcell1 + + obj.setdictvalue(space, "a", W_FloatObject(7.43)) + assert obj.getdictvalue(space, "a") == 7.43 + mutcell2 = obj._mapdict_read_storage(0) + assert mutcell2.floatvalue == 7.43 + assert mutcell2 is mutcell1 + + +def test_mutcell_unwrap_only_if_needed(): + from pypy.objspace.std.intobject import W_IntObject + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", "foo") + assert not obj._get_mapdict_map().can_contain_mutable_cell + obj.setdictvalue(space, "a", W_IntObject(6)) + obj.setdictvalue(space, "a", W_IntObject(6)) + assert obj._get_mapdict_map().can_contain_mutable_cell + + obj._get_mapdict_map().can_contain_mutable_cell = False + mutcell = IntMutableCell(1) + obj._mapdict_write_storage(0, mutcell) + assert obj.getdictvalue(space, "a") is mutcell # not unwrapped + + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -259,6 +259,43 @@ cell = w_A._getdictvalue_no_unwrapping(space, "x") assert space.float_w(cell.w_value) == 2.2 + def test_float_cells(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newfloat(1.1)) + assert w_A.version_tag() is not atag + assert space.float_w(space.getattr(w_A, w_x)) == 1.1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newfloat(2.1)) + assert w_A.version_tag() is not atag + assert space.float_w(space.getattr(w_A, w_x)) == 2.1 + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert cell.floatvalue == 2.1 + + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newfloat(3.1)) + assert w_A.version_tag() is atag + assert space.float_w(space.getattr(w_A, w_x)) == 3.1 + assert cell.floatvalue == 3.1 + + space.setattr(w_A, w_x, space.newfloat(4.1)) + assert w_A.version_tag() is atag + assert space.float_w(space.getattr(w_A, w_x)) == 4.1 + assert cell.floatvalue == 4.1 + + def test_float_cell_turns_into_cell(self): + space = self.space + w_x = space.wrap("x") + w_A, w_B, w_C = self.get_three_classes() + atag = w_A.version_tag() + space.setattr(w_A, w_x, space.newfloat(1.1)) + space.setattr(w_A, w_x, space.newfloat(2.1)) + space.setattr(w_A, w_x, space.wrap("abc")) + cell = w_A._getdictvalue_no_unwrapping(space, "x") + assert space.str_w(cell.w_value) == "abc" class AppTestVersionedType(test_typeobject.AppTestTypeObject): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -36,15 +36,31 @@ def __repr__(self): return "" % (self.intvalue, ) +class FloatMutableCell(MutableCell): + def __init__(self, floatvalue): + self.floatvalue = floatvalue + + def unwrap_cell(self, space): + return space.wrap(self.floatvalue) + + def __repr__(self): + return "" % (self.floatvalue, ) + def unwrap_cell(space, w_value): + if isinstance(w_value, MutableCell): + return w_value.unwrap_cell(space) + return w_value + + +def unwrap_cell_iftypeversion(space, w_value): if space.config.objspace.std.withtypeversion: - if isinstance(w_value, MutableCell): - return w_value.unwrap_cell(space) + return unwrap_cell(space, w_value) return w_value def write_cell(space, w_cell, w_value): from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject if w_cell is None: # attribute does not exist at all, write it without a cell first return w_value @@ -54,14 +70,19 @@ elif isinstance(w_cell, IntMutableCell) and type(w_value) is W_IntObject: w_cell.intvalue = w_value.intval return None + elif isinstance(w_cell, FloatMutableCell) and type(w_value) is W_FloatObject: + w_cell.floatvalue = w_value.floatval + return None elif space.is_w(w_cell, w_value): # If the new value and the current value are the same, don't # create a level of indirection, or mutate the version. return None - if type(w_value) is W_IntObject: - return IntMutableCell(w_value.intval) - else: - return ObjectMutableCell(w_value) + if not isinstance(w_cell, MutableCell): + if type(w_value) is W_IntObject: + return IntMutableCell(w_value.intval) + if type(w_value) is W_FloatObject: + return FloatMutableCell(w_value.floatval) + return ObjectMutableCell(w_value) class VersionTag(object): pass @@ -274,12 +295,12 @@ if space.config.objspace.std.withtypeversion: version_tag = w_self.version_tag() if version_tag is not None: - return unwrap_cell( + return unwrap_cell_iftypeversion( space, w_self._pure_getdictvalue_no_unwrapping( space, version_tag, attr)) w_value = w_self._getdictvalue_no_unwrapping(space, attr) - return unwrap_cell(space, w_value) + return unwrap_cell_iftypeversion(space, w_value) def _getdictvalue_no_unwrapping(w_self, space, attr): w_value = w_self.dict_w.get(attr, None) From pypy.commits at gmail.com Tue Jan 19 14:06:02 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:02 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: integrate value-profiling and type cells better Message-ID: <569e891a.878e1c0a.5ceee.ffff9497@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81862:19c895bc1e0d Date: 2016-01-15 21:29 +0100 http://bitbucket.org/pypy/pypy/changeset/19c895bc1e0d/ Log: integrate value-profiling and type cells better diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -64,12 +64,14 @@ write_unnecessary = attr.see_write(w_value) if not attr.ever_mutated: attr.ever_mutated = True - # if this path is taken, the storage is already filled from the time we + # if write_unnecessary, the storage is already filled from the time we # did the map transition. Therefore, if the value profiler says so, we # can not do the write + if write_unnecessary: + return True cell = obj._mapdict_read_storage(attr.storageindex) w_value = attr._write_cell(cell, w_value) - if write_unnecessary and w_value is not None: + if w_value is not None: obj._mapdict_write_storage(attr.storageindex, w_value) return True diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -13,6 +13,13 @@ space = FakeSpace() space.config = Config +class Value: + def __init__(self, val=None): + self.val = val + + def __repr__(self): + return "Value(%r)" % self.val + class Class(object): name = 'testing' def __init__(self, hasdict=True): @@ -35,31 +42,45 @@ def test_plain_attribute(): w_cls = None - aa = PlainAttribute(("b", DICT), - PlainAttribute(("a", DICT), - Terminator(space, w_cls))) - assert aa.space is space - assert aa.terminator.w_cls is w_cls - assert aa.get_terminator() is aa.terminator + terminator = Terminator(space, w_cls) + amap = terminator._get_new_attr("a", DICT) + bmap = amap._get_new_attr("b", DICT) + assert bmap.space is space + assert bmap.terminator.w_cls is w_cls + assert bmap.get_terminator() is bmap.terminator + assert bmap.length() == 2 + assert bmap.get_terminator() is bmap.back.back + + v10 = Value(10) + v20 = Value(20) + v30 = Value(30) + v40 = Value(40) + v50 = Value(50) + v60 = Value(60) obj = Object() - obj.map, obj.storage = aa, [10, 20] - assert obj.getdictvalue(space, "a") == 10 - assert obj.getdictvalue(space, "b") == 20 + obj.map = terminator + obj.storage = [None, None] + obj.setdictvalue(space, "a", v10) + obj.setdictvalue(space, "b", v20) + assert obj.map is bmap + assert obj.storage == [v10, v20] + assert obj.getdictvalue(space, "a") == v10 + assert obj.getdictvalue(space, "b") == v20 assert obj.getdictvalue(space, "c") is None obj = Object() - obj.map, obj.storage = aa, [30, 40] - obj.setdictvalue(space, "a", 50) - assert obj.storage == [50, 40] - assert obj.getdictvalue(space, "a") == 50 - obj.setdictvalue(space, "b", 60) - assert obj.storage == [50, 60] - assert obj.getdictvalue(space, "b") == 60 + obj.map = terminator + obj.storage = [None, None] + obj.setdictvalue(space, "a", v30) + obj.setdictvalue(space, "b", v40) + obj.setdictvalue(space, "a", v50) + assert obj.storage == [v50, v40] + assert obj.getdictvalue(space, "a") == v50 + obj.setdictvalue(space, "b", v60) + assert obj.storage == [v50, v60] + assert obj.getdictvalue(space, "b") == v60 - assert aa.length() == 2 - - assert aa.get_terminator() is aa.back.back def test_huge_chain(): current = Terminator(space, None) @@ -447,8 +468,6 @@ def test_value_profiling(monkeypatch): monkeypatch.setattr(jit, "we_are_jitted", lambda : True) - class Value: - pass a = Value() cls = Class() obj = cls.instantiate() From pypy.commits at gmail.com Tue Jan 19 14:06:04 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:04 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: proper integration between a field being known an instance of W_IntObject or Message-ID: <569e891c.c8b3c20a.e79c4.ffffa1bb@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81863:703d8c63a5f6 Date: 2016-01-16 07:20 +0100 http://bitbucket.org/pypy/pypy/changeset/703d8c63a5f6/ Log: proper integration between a field being known an instance of W_IntObject or W_FloatObject diff --git a/pypy/interpreter/valueprof.py b/pypy/interpreter/valueprof.py --- a/pypy/interpreter/valueprof.py +++ b/pypy/interpreter/valueprof.py @@ -28,6 +28,22 @@ def get_int_val(self, w_obj): raise NotImplementedError("abstract base") + def write_necessary(self, w_value): + status = self._vprof_status + if status == SEEN_TOO_MUCH: + return True + # we must have seen something already, because it only makes sense to + # call write_necessary if there is already a value there + assert not status == SEEN_NOTHING + if status == SEEN_CONSTANT_INT: + return (self.is_int(w_value) and + self.read_constant_int() != self.get_int_val(w_value)) + elif status == SEEN_CONSTANT_OBJ: + prev_obj = self.try_read_constant_obj() + return prev_obj is not w_value + return True + + def see_write(self, w_value): """ inform the value profiler of a write. returns False, unless the value is known to be a constant, and w_value that constant (in that diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -34,6 +34,7 @@ def read(self, obj, selector): from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject attr = self.find_map_attr(selector) if attr is None: return self.terminator._read_terminator(obj, selector) @@ -54,21 +55,30 @@ else: result = obj._mapdict_read_storage(attr.storageindex) if jit.we_are_jitted() and attr.class_is_known(): - jit.record_exact_class(result, attr.read_constant_cls()) + cls = attr.read_constant_cls() + if cls is W_IntObject: + # this means that the class stored in the storage is an + # IntMutableCell + return W_IntObject(result.intvalue) + if cls is W_FloatObject: + # ditto + return W_FloatObject(result.floatvalue) + jit.record_exact_class(result, cls) return attr._read_cell(result) def write(self, obj, selector, w_value): attr = self.find_map_attr(selector) if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - write_unnecessary = attr.see_write(w_value) + # if the write is not necessary, the storage is already filled from the + # time we did the map transition. Therefore, if the value profiler says + # so, we can not do the write + write_necessary = attr.write_necessary(w_value) + if not write_necessary: + return True if not attr.ever_mutated: attr.ever_mutated = True - # if write_unnecessary, the storage is already filled from the time we - # did the map transition. Therefore, if the value profiler says so, we - # can not do the write - if write_unnecessary: - return True + self.see_write(w_value) cell = obj._mapdict_read_storage(attr.storageindex) w_value = attr._write_cell(cell, w_value) if w_value is not None: @@ -173,7 +183,6 @@ def add_attr(self, obj, selector, w_value): # grumble, jit needs this attr = self._get_new_attr(selector[0], selector[1]) - w_value = attr._write_cell(None, w_value) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() @@ -190,10 +199,11 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) + # important to see the write of the original value, not the cell + attr.see_write(w_value) w_value = attr._write_cell(None, w_value) assert w_value is not None obj._mapdict_write_storage(attr.storageindex, w_value) - attr.see_write(w_value) def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -512,6 +512,29 @@ assert obj.getdictvalue(space, "a") == a assert seen == [(a, Value), (a, Value)] +def test_value_profiling_known_cls_mutcells(monkeypatch): + from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject + for cls, mutcls, val, attrname in [ + (W_IntObject, IntMutableCell, 0, "intval"), + (W_FloatObject, FloatMutableCell, 0.0, "floatval")]: + a = cls(val) + a1 = cls(val + 1) + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", a1) + obj = cls.instantiate() + obj.setdictvalue(space, "a", a1) + obj.setdictvalue(space, "a", a) + + def f(obj, cls): + assert False, "unreachable" + monkeypatch.setattr(jit, "we_are_jitted", lambda : True) + monkeypatch.setattr(jit, "record_exact_class", f) + + assert getattr(obj.getdictvalue(space, "a"), attrname) == val + assert getattr(obj.getdictvalue(space, "a"), attrname) == val + def test_value_profiling_elide_write(monkeypatch): monkeypatch.setattr(jit, "we_are_jitted", lambda : True) From pypy.commits at gmail.com Tue Jan 19 14:06:06 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:06 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: do the same thing for write Message-ID: <569e891e.0f811c0a.ec52f.ffff8f93@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81864:6e1f9f4ed3fa Date: 2016-01-16 07:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6e1f9f4ed3fa/ Log: do the same thing for write diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -59,14 +59,18 @@ if cls is W_IntObject: # this means that the class stored in the storage is an # IntMutableCell + assert isinstance(result, IntMutableCell) return W_IntObject(result.intvalue) if cls is W_FloatObject: # ditto + assert isinstance(result, FloatMutableCell) return W_FloatObject(result.floatvalue) jit.record_exact_class(result, cls) return attr._read_cell(result) def write(self, obj, selector, w_value): + from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject attr = self.find_map_attr(selector) if attr is None: return self.terminator._write_terminator(obj, selector, w_value) @@ -80,6 +84,21 @@ attr.ever_mutated = True self.see_write(w_value) cell = obj._mapdict_read_storage(attr.storageindex) + if jit.we_are_jitted() and attr.class_is_known(): + cls = attr.read_constant_cls() + if cls is W_IntObject: + # this means that the class stored in the storage is an + # IntMutableCell + assert isinstance(cell, IntMutableCell) + assert isinstance(w_value, W_IntObject) + cell.intvalue = w_value.intval + return True + if cls is W_FloatObject: + # ditto + assert isinstance(cell, FloatMutableCell) + assert isinstance(w_value, W_FloatObject) + cell.floatvalue = w_value.floatval + return True w_value = attr._write_cell(cell, w_value) if w_value is not None: obj._mapdict_write_storage(attr.storageindex, w_value) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -535,6 +535,10 @@ assert getattr(obj.getdictvalue(space, "a"), attrname) == val assert getattr(obj.getdictvalue(space, "a"), attrname) == val + monkeypatch.setattr(obj.map, "_write_cell", None) # not needed for the next write + obj.setdictvalue(space, "a", a1) + assert getattr(obj.getdictvalue(space, "a"), attrname) == val + 1 + def test_value_profiling_elide_write(monkeypatch): monkeypatch.setattr(jit, "we_are_jitted", lambda : True) From pypy.commits at gmail.com Tue Jan 19 14:06:08 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:08 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: merge default Message-ID: <569e8920.84e31c0a.70bdc.ffff92a8@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81865:a157c98e9ce3 Date: 2016-01-19 19:23 +0100 http://bitbucket.org/pypy/pypy/changeset/a157c98e9ce3/ Log: merge default diff too long, truncating to 2000 out of 3194 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,31 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.setdefault('libraries', []) + if pythonlib not in libraries: + libraries.append(pythonlib) + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +626,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +669,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -114,3 +114,12 @@ .. branch: globals-quasiimmut Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,13 +84,6 @@ from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -149,6 +145,9 @@ return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1084,7 +1084,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,10 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + def get_dict_rtld_constants(): found = {} @@ -78,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -9,18 +9,18 @@ VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -41,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,146 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" +/* XXX Windows missing */ +#include +#include +#include + +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + RPyGilAllocate(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 5): - py.test.skip("re-enable me in version 1.5") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py @@ -54,3 +54,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -1719,3 +1719,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py @@ -60,11 +60,16 @@ if (name.endswith('.so') or name.endswith('.pyd') or name.endswith('.dylib')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -209,6 +214,58 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.SO': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_2(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target=os.path.join("mod_name_in_package", "foo.bar.*")) + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'foo.bar.baz': None, + 'mod_name_in_package': {'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py @@ -0,0 +1,1 @@ +# Generated by pypy/tool/import_cffi.py diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -0,0 +1,13 @@ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") + + from _add1_cffi import ffi + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -0,0 +1,14 @@ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py @@ -0,0 +1,30 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add2(int, int, int); +""") + +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") + + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + ffi = _add2_cffi.ffi + + @ffi.def_extern() + def add2(x, y, z): + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py @@ -0,0 +1,25 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add3(int, int, int, int); +""") + +ffi.embedding_init_code(r""" + from _add3_cffi import ffi + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -0,0 +1,27 @@ +#include + +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + fflush(stdout); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int (*my_callback)(int); + int add_rec(int, int); +""") + +ffi.embedding_init_code(r""" + from _add_recursive_cffi import ffi, lib + import sys + print("preparing REC") + sys.stdout.flush() + + @ffi.def_extern() + def add_rec(x, y): + print("adding %d and %d" % (x, y)) + sys.stdout.flush() + return x + y + + x = lib.my_callback(400) + print('<<< %d >>>' % (x,)) +""") + +ffi.set_source("_add_recursive_cffi", """ +/* use CFFI_DLLEXPORT: on windows, it expands to __declspec(dllexport), + which is needed to export a variable from a dll */ +CFFI_DLLEXPORT int (*my_callback)(int); +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -0,0 +1,86 @@ +#include +#include +#include +#ifdef PTEST_USE_THREAD +# include +# include +static sem_t done; +#endif + + +extern int add1(int, int); + + +static double time_delta(struct timeval *stop, struct timeval *start) +{ + return (stop->tv_sec - start->tv_sec) + + 1e-6 * (stop->tv_usec - start->tv_usec); +} + +static double measure(void) +{ + long long i, iterations; + int result; + struct timeval start, stop; + double elapsed; + + add1(0, 0); /* prepare off-line */ + + i = 0; + iterations = 1000; + result = gettimeofday(&start, NULL); + assert(result == 0); + + while (1) { + for (; i < iterations; i++) { + add1(((int)i) & 0xaaaaaa, ((int)i) & 0x555555); + } + result = gettimeofday(&stop, NULL); + assert(result == 0); + + elapsed = time_delta(&stop, &start); + assert(elapsed >= 0.0); + if (elapsed > 2.5) + break; + iterations = iterations * 3 / 2; + } + + return elapsed / (double)iterations; +} + +static void *start_routine(void *arg) +{ + double t = measure(); + printf("time per call: %.3g\n", t); + +#ifdef PTEST_USE_THREAD + int status = sem_post(&done); + assert(status == 0); +#endif + + return arg; +} + + +int main(void) +{ +#ifndef PTEST_USE_THREAD + start_routine(0); +#else + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + add1(0, 0); /* this is the main thread */ + + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = sem_wait(&done); + assert(status == 0); + } +#endif + return 0; +} diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py @@ -0,0 +1,22 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _perf_cffi import ffi + + @ffi.def_extern() + def add1(x, y): + return x + y +""") + +ffi.set_source("_perf_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -0,0 +1,151 @@ +# Generated by pypy/tool/import_cffi.py +import py +import sys, os, re +import shutil, subprocess, time +from pypy.module.test_lib_pypy.cffi_tests.udir import udir +import cffi + +if hasattr(sys, 'gettotalrefcount'): + py.test.skip("tried hard and failed to have these tests run " + "in a debug-mode python") + + +local_dir = os.path.dirname(os.path.abspath(__file__)) +_link_error = '?' + +def check_lib_python_found(tmpdir): + global _link_error + if _link_error == '?': + ffi = cffi.FFI() + kwds = {} + ffi._apply_embedding_fix(kwds) + ffi.set_source("_test_lib_python_found", "", **kwds) + try: + ffi.compile(tmpdir=tmpdir, verbose=True) + except cffi.VerificationError as e: + _link_error = e + else: + _link_error = None + if _link_error: + py.test.skip(str(_link_error)) + + +class EmbeddingTests: + _compiled_modules = {} + + def setup_method(self, meth): + check_lib_python_found(str(udir.ensure('embedding', dir=1))) + self._path = udir.join('embedding', meth.__name__) + if sys.platform == "win32": + self._compiled_modules.clear() # workaround + + def get_path(self): + return str(self._path.ensure(dir=1)) + + def _run(self, args, env=None): + print(args) + popen = subprocess.Popen(args, env=env, cwd=self.get_path(), + stdout=subprocess.PIPE, + universal_newlines=True) + output = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("popen failed with exit code %r: %r" % ( + err, args)) + print(output.rstrip()) + return output + + def prepare_module(self, name): + if name not in self._compiled_modules: + path = self.get_path() + filename = '%s.py' % name + # NOTE: if you have an .egg globally installed with an older + # version of cffi, this will not work, because sys.path ends + # up with the .egg before the PYTHONPATH entries. I didn't + # find a solution to that: we could hack sys.path inside the + # script run here, but we can't hack it in the same way in + # execute(). + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + output = self._run([sys.executable, os.path.join(local_dir, filename)], + env=env) + match = re.compile(r"\bFILENAME: (.+)").search(output) + assert match + dynamic_lib_name = match.group(1) + if sys.platform == 'win32': + assert dynamic_lib_name.endswith('_cffi.dll') + else: + assert dynamic_lib_name.endswith('_cffi.so') + self._compiled_modules[name] = dynamic_lib_name + return self._compiled_modules[name] + + def compile(self, name, modules, opt=False, threads=False, defines={}): + path = self.get_path() + filename = '%s.c' % name + shutil.copy(os.path.join(local_dir, filename), path) + shutil.copy(os.path.join(local_dir, 'thread-test.h'), path) + import distutils.ccompiler + curdir = os.getcwd() + try: + os.chdir(self.get_path()) + c = distutils.ccompiler.new_compiler() + print('compiling %s with %r' % (name, modules)) + extra_preargs = [] + if sys.platform == 'win32': + libfiles = [] + for m in modules: + m = os.path.basename(m) + assert m.endswith('.dll') + libfiles.append('Release\\%s.lib' % m[:-4]) + modules = libfiles + elif threads: + extra_preargs.append('-pthread') + objects = c.compile([filename], macros=sorted(defines.items()), debug=True) + c.link_executable(objects + modules, name, extra_preargs=extra_preargs) + finally: + os.chdir(curdir) + + def execute(self, name): + path = self.get_path() + env = os.environ.copy() + env['PYTHONPATH'] = os.path.dirname(os.path.dirname(local_dir)) + libpath = env.get('LD_LIBRARY_PATH') + if libpath: + libpath = path + ':' + libpath + else: + libpath = path + env['LD_LIBRARY_PATH'] = libpath + print('running %r in %r' % (name, path)) + executable_name = name + if sys.platform == 'win32': + executable_name = os.path.join(path, executable_name + '.exe') + popen = subprocess.Popen([executable_name], cwd=path, env=env, + stdout=subprocess.PIPE, + universal_newlines=True) + result = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("%r failed with exit code %r" % (name, err)) + return result + + +class TestBasic(EmbeddingTests): + def test_basic(self): + add1_cffi = self.prepare_module('add1') + self.compile('add1-test', [add1_cffi]) + output = self.execute('add1-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") + + def test_two_modules(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('add2-test', [add1_cffi, add2_cffi]) + output = self.execute('add2-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "prepADD2\n" + "adding 100 and -5 and -20\n" + "got: 42 75\n") diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py @@ -0,0 +1,53 @@ +# Generated by pypy/tool/import_cffi.py +import sys +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + +if sys.platform == 'win32': + import py + py.test.skip("written with POSIX functions") + + +class TestPerformance(EmbeddingTests): + def test_perf_single_threaded(self): + perf_cffi = self.prepare_module('perf') From pypy.commits at gmail.com Tue Jan 19 14:06:10 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 19 Jan 2016 11:06:10 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: what a stupid bug Message-ID: <569e8922.42cbc20a.63b1.ffff9b6e@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81866:e411e587da9b Date: 2016-01-19 20:04 +0100 http://bitbucket.org/pypy/pypy/changeset/e411e587da9b/ Log: what a stupid bug diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -82,7 +82,7 @@ return True if not attr.ever_mutated: attr.ever_mutated = True - self.see_write(w_value) + attr.see_write(w_value) cell = obj._mapdict_read_storage(attr.storageindex) if jit.we_are_jitted() and attr.class_is_known(): cls = attr.read_constant_cls() diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -558,6 +558,19 @@ obj.setdictvalue(space, "a", a1) assert obj.getdictvalue(space, "a") is a1 + +def test_bug_lost_write(monkeypatch): + none = Value(None) + true = Value(True) + false = Value(False) + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "tw", true) + obj.setdictvalue(space, "h", none) + obj.setdictvalue(space, "tw", false) + obj.setdictvalue(space, "tw", true) + assert obj.getdictvalue(space, "tw") is true + # ___________________________________________________________ # dict tests From pypy.commits at gmail.com Tue Jan 19 14:45:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Jan 2016 11:45:08 -0800 (PST) Subject: [pypy-commit] pypy exctrans: hg merge default Message-ID: <569e9244.d69c1c0a.e95bf.ffff9f1e@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81867:2f4d7c650765 Date: 2016-01-19 19:37 +0000 http://bitbucket.org/pypy/pypy/changeset/2f4d7c650765/ Log: hg merge default diff too long, truncating to 2000 out of 6108 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,31 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.setdefault('libraries', []) + if pythonlib not in libraries: + libraries.append(pythonlib) + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +626,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +669,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -110,3 +110,16 @@ short-running Python callbacks. (CFFI on CPython has a hack to achieve the same result.) This can also be seen as a bug fix: previously, thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,13 +84,6 @@ from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -149,6 +145,9 @@ return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1084,7 +1084,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,10 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + def get_dict_rtld_constants(): found = {} @@ -78,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,24 +2,25 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) @@ -38,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,146 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" +/* XXX Windows missing */ +#include +#include +#include + +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + RPyGilAllocate(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/Doc_stubgen_enable.patch deleted file mode 100644 --- a/pypy/module/cpyext/Doc_stubgen_enable.patch +++ /dev/null @@ -1,27 +0,0 @@ -Index: Doc/tools/sphinx/ext/refcounting.py -=================================================================== ---- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) -+++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) -@@ -91,6 +91,7 @@ - if app.config.refcount_file: - refcounts = Refcounts.fromfile( - path.join(app.srcdir, app.config.refcount_file)) -+ app._refcounts = refcounts - app.connect('doctree-read', refcounts.add_refcount_annotations) - - -Index: Doc/conf.py -=================================================================== ---- Doc/conf.py (Revision 79421) -+++ Doc/conf.py (Arbeitskopie) -@@ -13,8 +13,8 @@ - # General configuration - # --------------------- - --extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', -- 'sphinx.ext.doctest', 'pyspecific'] -+extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', -+ 'sphinx.ext.doctest', 'pyspecific', ] - templates_path = ['tools/sphinxext'] - - # General substitutions. diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/cpyext/patches/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch @@ -0,0 +1,27 @@ +Index: Doc/tools/sphinx/ext/refcounting.py +=================================================================== +--- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) ++++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) +@@ -91,6 +91,7 @@ + if app.config.refcount_file: + refcounts = Refcounts.fromfile( + path.join(app.srcdir, app.config.refcount_file)) ++ app._refcounts = refcounts + app.connect('doctree-read', refcounts.add_refcount_annotations) + + +Index: Doc/conf.py +=================================================================== +--- Doc/conf.py (Revision 79421) ++++ Doc/conf.py (Arbeitskopie) +@@ -13,8 +13,8 @@ + # General configuration + # --------------------- + +-extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', +- 'sphinx.ext.doctest', 'pyspecific'] ++extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', ++ 'sphinx.ext.doctest', 'pyspecific', ] + templates_path = ['tools/sphinxext'] + + # General substitutions. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +534,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -438,9 +438,6 @@ i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) @@ -472,6 +469,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() From pypy.commits at gmail.com Tue Jan 19 15:59:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Jan 2016 12:59:09 -0800 (PST) Subject: [pypy-commit] pypy exctrans: simplify code Message-ID: <569ea39d.6adec20a.1460b.ffffd1ec@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81868:f08b79d813f5 Date: 2016-01-19 20:00 +0000 http://bitbucket.org/pypy/pypy/changeset/f08b79d813f5/ Log: simplify code diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -903,9 +903,9 @@ funcgen.implementation_end() def new_funcnode(db, T, obj, forcename=None): - sandbox = db.sandbox and need_sandboxing(obj) - if sandbox: - if getattr(obj, 'external', None) is not None: + if db.sandbox: + if (getattr(obj, 'external', None) is not None and + not obj._safe_not_sandboxed): from rpython.translator.sandbox import rsandbox obj.__dict__['graph'] = rsandbox.get_sandbox_stub( obj, db.translator.rtyper) @@ -918,12 +918,6 @@ name = _select_name(db, obj) return FuncNode(db, T, obj, name) -def need_sandboxing(fnobj): - if hasattr(fnobj, '_safe_not_sandboxed'): - return not fnobj._safe_not_sandboxed - else: - return "if_external" - def select_function_code_generators(fnobj, db, functionname): if hasattr(fnobj, 'graph'): exception_policy = getattr(fnobj, 'exception_policy', None) From pypy.commits at gmail.com Tue Jan 19 15:59:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 19 Jan 2016 12:59:11 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Simplify: split off ExternalFuncNode class from FuncNode Message-ID: <569ea39f.552f1c0a.1091.ffffb41d@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81869:ced39321ee69 Date: 2016-01-19 20:58 +0000 http://bitbucket.org/pypy/pypy/changeset/ced39321ee69/ Log: Simplify: split off ExternalFuncNode class from FuncNode diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -9,9 +9,9 @@ from rpython.rtyper.lltypesystem import llgroup from rpython.tool.sourcetools import valid_identifier from rpython.translator.c.primitive import PrimitiveName, PrimitiveType -from rpython.translator.c.node import StructDefNode, ArrayDefNode -from rpython.translator.c.node import FixedSizeArrayDefNode, BareBoneArrayDefNode -from rpython.translator.c.node import ContainerNodeFactory, ExtTypeOpaqueDefNode +from rpython.translator.c.node import ( + StructDefNode, ArrayDefNode, FixedSizeArrayDefNode, BareBoneArrayDefNode, + ContainerNodeFactory, ExtTypeOpaqueDefNode, FuncNode) from rpython.translator.c.support import cdecl, CNameManager from rpython.translator.c.support import log, barebonearray from rpython.translator.c.extfunc import do_the_getting @@ -383,7 +383,7 @@ def all_graphs(self): graphs = [] for node in self.containerlist: - if node.nodekind == 'func': + if isinstance(node, FuncNode): for graph in node.graphs_to_patch(): graphs.append(graph) return graphs diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -807,23 +807,17 @@ # ____________________________________________________________ -class FuncNode(ContainerNode): +class FuncNodeBase(ContainerNode): nodekind = 'func' eci_name = 'compilation_info' # there not so many node of this kind, slots should not # be necessary - def __init__(self, db, T, obj, ptrname): Node.__init__(self, db) self.globalcontainer = True self.T = T self.obj = obj self.name = ptrname - self.funcgen = select_function_code_generators(obj, db, ptrname) - if self.funcgen: - argnames = self.funcgen.argnames() - self.implementationtypename = db.gettype(T, argnames=argnames) - self._funccodegen_owner = self.funcgen self.typename = db.gettype(T) #, who_asks=self) def getptrname(self): @@ -832,30 +826,32 @@ def basename(self): return self.obj._name + +class FuncNode(FuncNodeBase): + def __init__(self, db, T, obj, ptrname): + FuncNodeBase.__init__(self, db, T, obj, ptrname) + exception_policy = getattr(obj, 'exception_policy', None) + self.funcgen = make_funcgen(obj.graph, db, exception_policy, ptrname) + argnames = self.funcgen.argnames() + self.implementationtypename = db.gettype(T, argnames=argnames) + self._funccodegen_owner = self.funcgen + def enum_dependencies(self): - if self.funcgen is None: - return [] return self.funcgen.allconstantvalues() def forward_declaration(self): callable = getattr(self.obj, '_callable', None) is_exported = getattr(callable, 'exported_symbol', False) - if self.funcgen: - yield '%s;' % ( - forward_cdecl(self.implementationtypename, - self.name, self.db.standalone, is_exported=is_exported)) + yield '%s;' % ( + forward_cdecl(self.implementationtypename, + self.name, self.db.standalone, is_exported=is_exported)) + + def graphs_to_patch(self): + for i in self.funcgen.graphs_to_patch(): + yield i def implementation(self): - if self.funcgen: - for s in self.funcgen_implementation(self.funcgen): - yield s - - def graphs_to_patch(self): - if self.funcgen: - for i in self.funcgen.graphs_to_patch(): - yield i - - def funcgen_implementation(self, funcgen): + funcgen = self.funcgen funcgen.implementation_begin() # recompute implementationtypename as the argnames may have changed argnames = funcgen.argnames() @@ -902,6 +898,20 @@ del bodyiter funcgen.implementation_end() +class ExternalFuncNode(FuncNodeBase): + def __init__(self, db, T, obj, ptrname): + FuncNodeBase.__init__(self, db, T, obj, ptrname) + self._funccodegen_owner = None + + def enum_dependencies(self): + return [] + + def forward_declaration(self): + return [] + + def implementation(self): + return [] + def new_funcnode(db, T, obj, forcename=None): if db.sandbox: if (getattr(obj, 'external', None) is not None and @@ -911,26 +921,22 @@ obj, db.translator.rtyper) obj.__dict__.pop('_safe_not_sandboxed', None) obj.__dict__.pop('external', None) - if forcename: name = forcename else: name = _select_name(db, obj) - return FuncNode(db, T, obj, name) + if hasattr(obj, 'graph'): + return FuncNode(db, T, obj, name) + elif getattr(obj, 'external', None) is not None: + assert obj.external == 'C' + if db.sandbox: + assert obj._safe_not_sandboxed + return ExternalFuncNode(db, T, obj, name) + elif hasattr(obj._callable, "c_name"): + return ExternalFuncNode(db, T, obj, name) # this case should only be used for entrypoints + else: + raise ValueError("don't know how to generate code for %r" % (obj,)) -def select_function_code_generators(fnobj, db, functionname): - if hasattr(fnobj, 'graph'): - exception_policy = getattr(fnobj, 'exception_policy', None) - return make_funcgen(fnobj.graph, db, exception_policy, functionname) - elif getattr(fnobj, 'external', None) is not None: - assert fnobj.external == 'C' - if db.sandbox: - assert fnobj._safe_not_sandboxed - return None - elif hasattr(fnobj._callable, "c_name"): - return None # this case should only be used for entrypoints - else: - raise ValueError("don't know how to generate code for %r" % (fnobj,)) def _select_name(db, obj): try: From pypy.commits at gmail.com Wed Jan 20 03:23:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 00:23:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: adding -march=zEC12 to the platform's cflags (linux) Message-ID: <569f440f.cb571c0a.a54d.495f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81870:0d8edc9df396 Date: 2016-01-20 09:22 +0100 http://bitbucket.org/pypy/pypy/changeset/0d8edc9df396/ Log: adding -march=zEC12 to the platform's cflags (linux) diff --git a/rpython/translator/platform/distutils_platform.py b/rpython/translator/platform/distutils_platform.py --- a/rpython/translator/platform/distutils_platform.py +++ b/rpython/translator/platform/distutils_platform.py @@ -1,5 +1,6 @@ import py, os, sys +import platform from rpython.translator.platform import Platform, log, CompilationError from rpython.translator.tool import stdoutcapture diff --git a/rpython/translator/platform/linux.py b/rpython/translator/platform/linux.py --- a/rpython/translator/platform/linux.py +++ b/rpython/translator/platform/linux.py @@ -21,6 +21,10 @@ so_ext = 'so' so_prefixes = ('lib', '') + if platform.machine() == 's390x': + # force the right target arch for s390x + cflags = ('-march=zEC12',) + cflags + def _args_for_shared(self, args): return ['-shared'] + args From pypy.commits at gmail.com Wed Jan 20 03:23:45 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 00:23:45 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed unused import added earlier Message-ID: <569f4411.42cbc20a.63b1.4cc7@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81871:68a03e4147e6 Date: 2016-01-20 09:22 +0100 http://bitbucket.org/pypy/pypy/changeset/68a03e4147e6/ Log: removed unused import added earlier diff --git a/rpython/translator/platform/distutils_platform.py b/rpython/translator/platform/distutils_platform.py --- a/rpython/translator/platform/distutils_platform.py +++ b/rpython/translator/platform/distutils_platform.py @@ -1,6 +1,5 @@ import py, os, sys -import platform from rpython.translator.platform import Platform, log, CompilationError from rpython.translator.tool import stdoutcapture From pypy.commits at gmail.com Wed Jan 20 03:38:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 00:38:54 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed print statement and exchanged it with llop.debug_print (thx mjacob) Message-ID: <569f479e.a867c20a.88a60.371d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81872:3b1bb2d5dd4d Date: 2016-01-20 09:38 +0100 http://bitbucket.org/pypy/pypy/changeset/3b1bb2d5dd4d/ Log: removed print statement and exchanged it with llop.debug_print (thx mjacob) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1461,8 +1461,10 @@ pmc.overwrite() def notimplemented_op(asm, op, arglocs, regalloc): - print "[ZARCH/asm] %s not implemented" % op.getopname() - raise NotImplementedError(op) + if we_are_translated(): + msg = "[ZARCH/asm] %s not implemented\n" % op.getopname() + llop.debug_print(lltype.Void, msg) + raise NotImplementedError(msg) asm_operations = [notimplemented_op] * (rop._LAST + 1) asm_extra_operations = {} diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1215,29 +1215,39 @@ prepare_oplist = [notimplemented] * (rop._LAST + 1) -implemented_count = 0 -total_count = 0 -missing = [] -for key, value in rop.__dict__.items(): - key = key.lower() - if key.startswith('_'): - continue - total_count += 1 - methname = 'prepare_%s' % key - if hasattr(Regalloc, methname): - func = getattr(Regalloc, methname).im_func - prepare_oplist[value] = func - implemented_count += 1 - else: - missing.append(methname) +if not we_are_translated(): + implemented_count = 0 + total_count = 0 + missing = [] + for key, value in rop.__dict__.items(): + key = key.lower() + if key.startswith('_'): + continue + total_count += 1 + methname = 'prepare_%s' % key + if hasattr(Regalloc, methname): + func = getattr(Regalloc, methname).im_func + prepare_oplist[value] = func + implemented_count += 1 + else: + if not methname.startswith('prepare_vec') and \ + not methname.startswith('prepare_get') and \ + not methname.startswith('prepare_raw') and \ + not methname.startswith('prepare_unicodesetitem') and \ + not methname.startswith('prepare_unicodegetitem') and \ + not methname.startswith('prepare_strgetitem') and \ + not methname.startswith('prepare_strsetitem') and \ + not methname.startswith('prepare_call_loopinvariant') and \ + not methname.startswith('prepare_call_pure') and \ + not methname.startswith('prepare_new') and \ + not methname.startswith('prepare_set'): + missing.append(methname) + else: + implemented_count += 1 -if __name__ == '__main__': - for m in missing: - print(" " * 4 + m) - print - print("regalloc implements %d of %d = %.2f%% of all resops" % \ - (implemented_count, total_count, (100.0 * implemented_count / total_count))) - -del implemented_count -del total_count -del missing + if __name__ == '__main__': + for m in missing: + print(" " * 4 + m) + print + print("regalloc implements %d of %d = %.2f%% of all resops" % \ + (implemented_count, total_count, (100.0 * implemented_count / total_count))) From pypy.commits at gmail.com Wed Jan 20 04:07:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 01:07:15 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed tests that fail after applying the scale in gc_load_indexed_r (llsupport/gc.py) Message-ID: <569f4e43.2815c20a.6c9f0.5df3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81873:45cca792fa2e Date: 2016-01-20 10:06 +0100 http://bitbucket.org/pypy/pypy/changeset/45cca792fa2e/ Log: fixed tests that fail after applying the scale in gc_load_indexed_r (llsupport/gc.py) diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py @@ -127,7 +127,7 @@ i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, 1, 0, %(ptr_array_descr.itemsize)s) i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 @@ -140,10 +140,10 @@ i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, 1, 0, %(ptr_array_descr.itemsize)s) i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) - p2 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 1, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p2 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), %(1 * ptr_array_descr.itemsize + 1)s, 1, 0, %(ptr_array_descr.itemsize)s) i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 From pypy.commits at gmail.com Wed Jan 20 04:08:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 01:08:59 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <569f4eab.42cbc20a.63b1.5c0e@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81874:fc1bb01e3b26 Date: 2016-01-20 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/fc1bb01e3b26/ Log: merged default diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -120,7 +120,7 @@ Updated to CFFI 1.5, which supports a new way to do embedding. Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. -.. branch fix-cpython-ssl-tests-2.7 +.. branch: fix-cpython-ssl-tests-2.7 Fix SSL tests by importing cpython's patch diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch rename from pypy/module/cpyext/Doc_stubgen_enable.patch rename to pypy/module/cpyext/patches/Doc_stubgen_enable.patch diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -30,10 +30,10 @@ assert isinstance(terminator, Terminator) self.terminator = terminator - def read(self, obj, selector): - attr = self.find_map_attr(selector) + def read(self, obj, name, index): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._read_terminator(obj, selector) + return self.terminator._read_terminator(obj, name, index) if ( jit.isconstant(attr.storageindex) and jit.isconstant(obj) and @@ -47,39 +47,39 @@ def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) - def write(self, obj, selector, w_value): - attr = self.find_map_attr(selector) + def write(self, obj, name, index, w_value): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._write_terminator(obj, selector, w_value) + return self.terminator._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True obj._mapdict_write_storage(attr.storageindex, w_value) return True - def delete(self, obj, selector): + def delete(self, obj, name, index): pass - def find_map_attr(self, selector): + def find_map_attr(self, name, index): if jit.we_are_jitted(): # hack for the jit: # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._find_map_attr_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(name, index) else: - return self._find_map_attr_indirection(selector) + return self._find_map_attr_indirection(name, index) @jit.elidable def _find_map_attr_jit_pure(self, name, index): - return self._find_map_attr_indirection((name, index)) + return self._find_map_attr_indirection(name, index) @jit.dont_look_inside - def _find_map_attr_indirection(self, selector): + def _find_map_attr_indirection(self, name, index): if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(selector) - return self._find_map_attr(selector) + return self._find_map_attr_cache(name, index) + return self._find_map_attr(name, index) @jit.dont_look_inside - def _find_map_attr_cache(self, selector): + def _find_map_attr_cache(self, name, index): space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -87,31 +87,36 @@ attrs_as_int = objectmodel.current_object_addr_as_int(self) # ^^^Note: see comment in typeobject.py for # _pure_lookup_where_with_method_cache() - hash_selector = objectmodel.compute_hash(selector) + + # unrolled hash computation for 2-tuple + c1 = 0x345678 + c2 = 1000003 + hash_name = objectmodel.compute_hash(name) + hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index) product = intmask(attrs_as_int * hash_selector) attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[attr_hash] - if cached_selector == selector: + cached_name = cache.names[attr_hash] + cached_index = cache.indexes[attr_hash] + if cached_name == name and cached_index == index: attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 return attr - attr = self._find_map_attr(selector) + attr = self._find_map_attr(name, index) cache.attrs[attr_hash] = self - cache.selectors[attr_hash] = selector + cache.names[attr_hash] = name + cache.indexes[attr_hash] = index cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 return attr - def _find_map_attr(self, selector): + def _find_map_attr(self, name, index): while isinstance(self, PlainAttribute): - if selector == self.selector: + if name == self.name and index == self.index: return self self = self.back return None @@ -137,23 +142,22 @@ @jit.elidable def _get_new_attr(self, name, index): - selector = name, index cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} - attr = cache.get(selector, None) + attr = cache.get((name, index), None) if attr is None: - attr = PlainAttribute(selector, self) - cache[selector] = attr + attr = PlainAttribute(name, index, self) + cache[name, index] = attr return attr - @jit.look_inside_iff(lambda self, obj, selector, w_value: + @jit.look_inside_iff(lambda self, obj, name, index, w_value: jit.isconstant(self) and - jit.isconstant(selector[0]) and - jit.isconstant(selector[1])) - def add_attr(self, obj, selector, w_value): + jit.isconstant(name) and + jit.isconstant(index)) + def add_attr(self, obj, name, index, w_value): # grumble, jit needs this - attr = self._get_new_attr(selector[0], selector[1]) + attr = self._get_new_attr(name, index) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() @@ -189,11 +193,11 @@ AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls - def _read_terminator(self, obj, selector): + def _read_terminator(self, obj, name, index): return None - def _write_terminator(self, obj, selector, w_value): - obj._get_mapdict_map().add_attr(obj, selector, w_value) + def _write_terminator(self, obj, name, index, w_value): + obj._get_mapdict_map().add_attr(obj, name, index, w_value) return True def copy(self, obj): @@ -231,40 +235,40 @@ class NoDictTerminator(Terminator): - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: return False - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) class DevolvedDictTerminator(Terminator): - def _read_terminator(self, obj, selector): - if selector[1] == DICT: + def _read_terminator(self, obj, name, index): + if index == DICT: space = self.space w_dict = obj.getdict(space) - return space.finditem_str(w_dict, selector[0]) - return Terminator._read_terminator(self, obj, selector) + return space.finditem_str(w_dict, name) + return Terminator._read_terminator(self, obj, name, index) - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: space = self.space w_dict = obj.getdict(space) - space.setitem_str(w_dict, selector[0], w_value) + space.setitem_str(w_dict, name, w_value) return True - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) - def delete(self, obj, selector): + def delete(self, obj, name, index): from pypy.interpreter.error import OperationError - if selector[1] == DICT: + if index == DICT: space = self.space w_dict = obj.getdict(space) try: - space.delitem(w_dict, space.wrap(selector[0])) + space.delitem(w_dict, space.wrap(name)) except OperationError, ex: if not ex.match(space, space.w_KeyError): raise return Terminator.copy(self, obj) - return Terminator.delete(self, obj, selector) + return Terminator.delete(self, obj, name, index) def remove_dict_entries(self, obj): assert 0, "should be unreachable" @@ -276,27 +280,28 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?'] - def __init__(self, selector, back): + def __init__(self, name, index, back): AbstractAttribute.__init__(self, back.space, back.terminator) - self.selector = selector + self.name = name + self.index = index self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False def _copy_attr(self, obj, new_obj): - w_value = self.read(obj, self.selector) - new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) + w_value = self.read(obj, self.name, self.index) + new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) - def delete(self, obj, selector): - if selector == self.selector: + def delete(self, obj, name, index): + if name == self.name and index == self.index: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True return self.back.copy(obj) - new_obj = self.back.delete(obj, selector) + new_obj = self.back.delete(obj, name, index) if new_obj is not None: self._copy_attr(obj, new_obj) return new_obj @@ -315,14 +320,14 @@ return new_obj def search(self, attrtype): - if self.selector[1] == attrtype: + if self.index == attrtype: return self return self.back.search(attrtype) def materialize_r_dict(self, space, obj, dict_w): new_obj = self.back.materialize_r_dict(space, obj, dict_w) - if self.selector[1] == DICT: - w_attr = space.wrap(self.selector[0]) + if self.index == DICT: + w_attr = space.wrap(self.name) dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) @@ -330,12 +335,12 @@ def remove_dict_entries(self, obj): new_obj = self.back.remove_dict_entries(obj) - if self.selector[1] != DICT: + if self.index != DICT: self._copy_attr(obj, new_obj) return new_obj def __repr__(self): - return "" % (self.selector, self.storageindex, self.back) + return "" % (self.name, self.index, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to @@ -347,8 +352,8 @@ assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE - self._empty_selector = (None, INVALID) - self.selectors = [self._empty_selector] * SIZE + self.names = [None] * SIZE + self.indexes = [INVALID] * SIZE self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} @@ -357,8 +362,9 @@ def clear(self): for i in range(len(self.attrs)): self.attrs[i] = None - for i in range(len(self.selectors)): - self.selectors[i] = self._empty_selector + for i in range(len(self.names)): + self.names[i] = None + self.indexes[i] = INVALID for i in range(len(self.cached_attrs)): self.cached_attrs[i] = None @@ -388,20 +394,20 @@ # objspace interface def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, (attrname, DICT)) + return self._get_mapdict_map().read(self, attrname, DICT) def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, (attrname, DICT), w_value) + return self._get_mapdict_map().write(self, attrname, DICT, w_value) def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, (attrname, DICT)) + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False self._become(new_obj) return True def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, ("dict", SPECIAL)) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) return w_dict @@ -409,7 +415,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag return w_dict @@ -425,7 +431,7 @@ # shell that continues to delegate to 'self'. if type(w_olddict.get_strategy()) is MapDictStrategy: w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag def getclass(self, space): @@ -443,16 +449,16 @@ self._init_empty(w_subtype.terminator) def getslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - return self._get_mapdict_map().read(self, key) + index = SLOTS_STARTING_FROM + slotindex + return self._get_mapdict_map().read(self, "slot", index) def setslotvalue(self, slotindex, w_value): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - self._get_mapdict_map().write(self, key, w_value) + index = SLOTS_STARTING_FROM + slotindex + self._get_mapdict_map().write(self, "slot", index, w_value) def delslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - new_obj = self._get_mapdict_map().delete(self, key) + index = SLOTS_STARTING_FROM + slotindex + new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False self._become(new_obj) @@ -462,7 +468,7 @@ def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, ("weakref", SPECIAL)) + lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) if lifeline is None: return None assert isinstance(lifeline, WeakrefLifeline) @@ -472,11 +478,11 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, ("weakref", SPECIAL), weakreflifeline) + self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) setweakref._cannot_really_call_random_things_ = True def delweakref(self): - self._get_mapdict_map().write(self, ("weakref", SPECIAL), None) + self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True class ObjectMixin(object): @@ -721,7 +727,7 @@ curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) if curr is None: raise KeyError - key = curr.selector[0] + key = curr.name w_value = self.getitem_str(w_dict, key) w_key = self.space.wrap(key) self.delitem(w_dict, w_key) @@ -758,7 +764,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr return None @@ -780,7 +786,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name return self.w_obj.getdictvalue(self.space, attr) return None @@ -801,7 +807,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr, self.w_obj.getdictvalue(self.space, attr) return None, None @@ -884,9 +890,9 @@ _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # - selector = ("", INVALID) + attrname, index = ("", INVALID) if w_descr is None: - selector = (name, DICT) # common case: no such attr in the class + attrname, index = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, MutableCell): pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): @@ -894,20 +900,21 @@ # (if any) has no relevance. from pypy.interpreter.typedef import Member if isinstance(w_descr, Member): # it is a slot -- easy case - selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) + attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. - selector = (name, DICT) + attrname = name + index = DICT # - if selector[1] != INVALID: - attr = map.find_map_attr(selector) + if index != INVALID: + attr = map.find_map_attr(attrname, index) if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.find_map_attr will always return None if selector[1]==DICT. + # map.find_map_attr will always return None if index==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -34,8 +34,8 @@ def test_plain_attribute(): w_cls = "class" - aa = PlainAttribute(("b", DICT), - PlainAttribute(("a", DICT), + aa = PlainAttribute("b", DICT, + PlainAttribute("a", DICT, Terminator(space, w_cls))) assert aa.space is space assert aa.terminator.w_cls is w_cls @@ -63,16 +63,16 @@ def test_huge_chain(): current = Terminator(space, "cls") for i in range(20000): - current = PlainAttribute((str(i), DICT), current) - assert current.find_map_attr(("0", DICT)).storageindex == 0 + current = PlainAttribute(str(i), DICT, current) + assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute(("b", DICT), PlainAttribute(("a", DICT), Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None - bb = PlainAttribute(("C", SPECIAL), PlainAttribute(("A", SLOTS_STARTING_FROM), aa)) + bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa)) assert bb.search(DICT) is aa assert bb.search(SLOTS_STARTING_FROM) is bb.back assert bb.search(SPECIAL) is bb @@ -320,7 +320,7 @@ d = {} w_d = FakeDict(d) - flag = obj.map.write(obj, ("dict", SPECIAL), w_d) + flag = obj.map.write(obj, "dict", SPECIAL, w_d) assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} From pypy.commits at gmail.com Wed Jan 20 04:26:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 01:26:39 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: missing import for llop Message-ID: <569f52cf.552f1c0a.1091.59b9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81875:cce5159ef75d Date: 2016-01-20 10:25 +0100 http://bitbucket.org/pypy/pypy/changeset/cce5159ef75d/ Log: missing import for llop diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -7,6 +7,7 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as l from rpython.jit.backend.zarch.pool import LiteralPool +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.backend.zarch.codebuilder import (InstrBuilder, OverwritingBuilder) from rpython.jit.backend.zarch.helper.regalloc import check_imm_value From pypy.commits at gmail.com Wed Jan 20 07:49:25 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 04:49:25 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: literal pool enhancement. it now stores unique values, no 64 bit integer/float/ref is every stored twice in the pool Message-ID: <569f8255.8f7e1c0a.d8754.ffffa392@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81876:30073c0bccb6 Date: 2016-01-20 13:48 +0100 http://bitbucket.org/pypy/pypy/changeset/30073c0bccb6/ Log: literal pool enhancement. it now stores unique values, no 64 bit integer/float/ref is every stored twice in the pool diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -10,6 +10,9 @@ RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET) from rpython.rlib.longlong2float import float2longlong +class PoolOverflow(Exception): + pass + class LiteralPool(object): def __init__(self): self.size = 0 @@ -17,7 +20,10 @@ self.pool_start = 0 self.label_offset = 0 self.label_count = 0 + # for constant offsets self.offset_map = {} + # for descriptors + self.offset_descr = {} self.constant_64_zeros = -1 self.constant_64_ones = -1 self.constant_64_sign_bit = -1 @@ -28,19 +34,21 @@ if op.is_guard(): # 1x gcmap pointer # 1x target address - self.offset_map[op.getdescr()] = self.size - self.reserve_literal(2 * 8) + self.offset_descr[op.getdescr()] = self.size + self.allocate_slot(2*8) elif op.getopnum() == rop.JUMP: descr = op.getdescr() if descr not in asm.target_tokens_currently_compiling: # this is a 'long' jump instead of a relative jump - self.offset_map[descr] = self.size - self.reserve_literal(8) + self.offset_descr[descr] = self.size + self.allocate_slot(8) elif op.getopnum() == rop.LABEL: descr = op.getdescr() if descr not in asm.target_tokens_currently_compiling: # this is a 'long' jump instead of a relative jump - self.offset_map[descr] = self.size + # TODO why no reserve literal? self.offset_map[descr] = self.size + self.offset_descr[descr] = self.size + self.allocate_slot(8) elif op.getopnum() == rop.INT_INVERT: self.constant_64_ones = 1 # we need constant ones!!! elif op.getopnum() == rop.INT_MUL_OVF: @@ -50,18 +58,15 @@ opnum == rop.UINT_RSHIFT: a0 = op.getarg(0) if a0.is_constant(): - self.offset_map[a0] = self.size - self.reserve_literal(8) + self.reserve_literal(8, a0) return elif opnum == rop.GC_STORE or opnum == rop.GC_STORE_INDEXED: arg = op.getarg(0) if arg.is_constant(): - self.offset_map[arg] = self.size - self.reserve_literal(8) + self.reserve_literal(8, arg) arg = op.getarg(2) if arg.is_constant(): - self.offset_map[arg] = self.size - self.reserve_literal(8) + self.reserve_literal(8, arg) return elif opnum in (rop.GC_LOAD_F, rop.GC_LOAD_I, @@ -71,27 +76,42 @@ rop.GC_LOAD_INDEXED_I,): arg = op.getarg(0) if arg.is_constant(): - self.offset_map[arg] = self.size - self.reserve_literal(8) + self.reserve_literal(8, arg) + if opnum == rop.GC_LOAD_INDEXED_R: + arg = op.getarg(1) + if arg.is_constant(): + self.reserve_literal(8, arg) return elif op.is_call_release_gil(): for arg in op.getarglist()[1:]: if arg.is_constant(): - self.offset_map[arg] = self.size - self.reserve_literal(8) + self.reserve_literal(8, arg) return for arg in op.getarglist(): if arg.is_constant(): - self.offset_map[arg] = self.size - self.reserve_literal(8) + self.reserve_literal(8, arg) def get_offset(self, box): + assert box.is_constant() + uvalue = self.unique_value(box) if not we_are_translated(): - assert self.offset_map[box] >= 0 - return self.offset_map[box] + assert self.offset_map[uvalue] >= 0 + return self.offset_map[uvalue] - def reserve_literal(self, size): - self.size += size + def unique_value(self, val): + if val.type == FLOAT: + return float2longlong(val.getfloat()) + elif val.type == INT: + return rffi.cast(lltype.Signed, val.getint()) + else: + assert val.type == REF + return rffi.cast(lltype.Signed, val.getref_base()) + + def reserve_literal(self, size, box): + uvalue = self.unique_value(box) + if uvalue not in self.offset_map: + self.offset_map[uvalue] = self.size + self.allocate_slot(size) def reset(self): self.pool_start = 0 @@ -103,6 +123,26 @@ self.constant_64_sign_bit = -1 self.constant_max_64_positive -1 + def check_size(self, size=-1): + if size == -1: + size = self.size + if size >= 2**19: + msg = '[S390X/literalpool] size exceeded %d >= %d\n' % (size, 2**19-8) + if we_are_translated(): + llop.debug_print(lltype.Void, msg) + raise PoolOverflow(msg) + + def allocate_slot(self, size): + val = self.size + size + self.check_size(val) + self.size = val + + def ensure_value(self, val): + if val not in self.offset_map: + self.offset_map[val] = self.size + self.allocate_slot(8) + return self.offset_map[val] + def pre_assemble(self, asm, operations, bridge=False): # O(len(operations)). I do not think there is a way # around this. @@ -110,9 +150,9 @@ # Problem: # constants such as floating point operations, plain pointers, # or integers might serve as parameter to an operation. thus - # it must be loaded into a register. You cannot do this with - # assembler immediates, because the biggest immediate value - # is 32 bit for branch instructions. + # it must be loaded into a register. There is a space benefit + # for 64-bit integers, or python floats, when a constant is used + # twice. # # Solution: # the current solution (gcc does the same), use a literal pool @@ -125,25 +165,23 @@ # no pool needed! return assert self.size % 2 == 0, "not aligned properly" - asm.mc.write('\x00' * self.size) - written = 0 if self.constant_64_ones != -1: - asm.mc.write('\xFF' * 8) - self.constant_64_ones = self.size + written - written += 8 + self.constant_64_ones = self.ensure_value(0xffffFFFFffffFFFF) if self.constant_64_zeros != -1: - asm.mc.write('\x00' * 8) - self.constant_64_zeros = self.size + written - written += 8 + self.constant_64_zeros = self.ensure_value(0x0) if self.constant_64_sign_bit != -1: - asm.mc.write('\x80' + ('\x00' * 7)) - self.constant_64_sign_bit = self.size + written - written += 8 + self.constant_64_zeros = self.ensure_value(0x8000000000000000) if self.constant_max_64_positive != -1: - asm.mc.write('\x7F' + ('\xFF' * 7)) - self.constant_max_64_positive = self.size + written - written += 8 - self.size += written + self.constant_max_64_positive = self.ensure_value(0x7fffFFFFffffFFFF) + wrote = 0 + for val, offset in self.offset_map.items(): + if not we_are_translated(): + print('pool: %s at offset: %d' % (val, offset)) + self.mc.write_i64() + wrote += 8 + self.offset_map = {} + # for the descriptors + asm.mc.write('\x00' * (self.size - wrote)) if not we_are_translated(): print "pool with %d quad words" % (self.size // 8) @@ -163,24 +201,11 @@ pending_guard_tokens = asm.pending_guard_tokens if self.size == 0: return - for val, offset in self.offset_map.items(): - if not we_are_translated(): - print('pool: %s at offset: %d' % (val, offset)) - if val.is_constant(): - if val.type == FLOAT: - self.overwrite_64(mc, offset, float2longlong(val.getfloat())) - elif val.type == INT: - i64 = rffi.cast(lltype.Signed, val.getint()) - self.overwrite_64(mc, offset, i64) - else: - assert val.type == REF - i64 = rffi.cast(lltype.Signed, val.getref_base()) - self.overwrite_64(mc, offset, i64) - for guard_token in pending_guard_tokens: descr = guard_token.faildescr - offset = self.offset_map[descr] + offset = self.offset_descr[descr] assert isinstance(offset, int) + assert offset >= 0 guard_token._pool_offset = offset ptr = rffi.cast(lltype.Signed, guard_token.gcmap) self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py --- a/rpython/jit/backend/zarch/test/test_pool.py +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -1,5 +1,5 @@ import py -from rpython.jit.backend.zarch.pool import LiteralPool +from rpython.jit.backend.zarch.pool import LiteralPool, PoolOverflow from rpython.jit.metainterp.history import (AbstractFailDescr, AbstractDescr, BasicFailDescr, BasicFinalDescr, JitCellToken, TargetToken, ConstInt, ConstPtr, Const, ConstFloat) @@ -10,6 +10,7 @@ from rpython.jit.backend.zarch.helper.regalloc import check_imm32 from rpython.jit.backend.zarch.assembler import AssemblerZARCH from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.jit.tool.oparser import parse class TestPoolZARCH(object): def setup_class(self): @@ -18,6 +19,8 @@ def setup_method(self, name): self.pool = LiteralPool() self.asm = None + self.cpu = getcpuclass()(None, None) + self.cpu.setup_once() def ensure_can_hold(self, opnum, args, descr=None): op = ResOperation(opnum, args, descr=descr) @@ -26,9 +29,9 @@ def const_in_pool(self, c): try: self.pool.get_offset(c) + return True except KeyError: return False - return True def test_constant_in_call_malloc(self): c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef)) @@ -42,18 +45,14 @@ for c1 in [ConstInt(1), ConstInt(2**44), InputArgInt(1)]: for c2 in [InputArgInt(1), ConstInt(1), ConstInt(2**55)]: self.ensure_can_hold(opnum, [c1,c2]) - if c1.is_constant() and check_imm32(c1): + if c1.is_constant(): assert self.const_in_pool(c1) - else: - assert not self.const_in_pool(c1) - if c2.is_constant() and check_imm32(c2): + if c2.is_constant(): assert self.const_in_pool(c2) - else: - assert not self.const_in_pool(c2) def test_pool_overflow(self): - cpu = getcpuclass()(None, None) - cpu.setup_once() - ops = [ResOperation(rop.FLOAT_ADD, [ConstFloat(0.0125), ConstFloat(float(i))]) for i in range(100)] - cpu.compile_loop([], ops, JitCellToken()) - + self.pool.size = (2**19-1) - 8 + self.pool.allocate_slot(8) + assert self.pool.size == 2**19-1 + with py.test.raises(PoolOverflow) as of: + self.pool.allocate_slot(8) From pypy.commits at gmail.com Wed Jan 20 08:04:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 05:04:07 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: some translation issues Message-ID: <569f85c7.878e1c0a.5ceee.ffffad26@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81877:f21a9cb9df2a Date: 2016-01-20 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/f21a9cb9df2a/ Log: some translation issues diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -3,6 +3,7 @@ from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import Const from rpython.rtyper.lltypesystem import lltype, rffi, llmemory @@ -166,18 +167,18 @@ return assert self.size % 2 == 0, "not aligned properly" if self.constant_64_ones != -1: - self.constant_64_ones = self.ensure_value(0xffffFFFFffffFFFF) + self.constant_64_ones = self.ensure_value(-1) if self.constant_64_zeros != -1: self.constant_64_zeros = self.ensure_value(0x0) if self.constant_64_sign_bit != -1: - self.constant_64_zeros = self.ensure_value(0x8000000000000000) + self.constant_64_zeros = self.ensure_value(-2**63) # == 0x8000000000000000 if self.constant_max_64_positive != -1: self.constant_max_64_positive = self.ensure_value(0x7fffFFFFffffFFFF) wrote = 0 for val, offset in self.offset_map.items(): if not we_are_translated(): print('pool: %s at offset: %d' % (val, offset)) - self.mc.write_i64() + asm.mc.write_i64(val) wrote += 8 self.offset_map = {} # for the descriptors From pypy.commits at gmail.com Wed Jan 20 08:18:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 05:18:22 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: did not write pool entires to the right location Message-ID: <569f891e.cb571c0a.a54d.ffffb2d7@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81878:1dcc4c9a8961 Date: 2016-01-20 14:17 +0100 http://bitbucket.org/pypy/pypy/changeset/1dcc4c9a8961/ Log: did not write pool entires to the right location diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1181,7 +1181,7 @@ self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: # restore the pool address - offset = self.pool.get_offset(descr) + \ + offset = self.pool.get_descr_offset(descr) + \ JUMPABS_TARGET_ADDR__POOL_OFFSET offset_pool = offset + JUMPABS_POOL_ADDR_POOL_OFFSET self.mc.LG(r.SCRATCH, l.pool(offset)) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -92,6 +92,9 @@ if arg.is_constant(): self.reserve_literal(8, arg) + def get_descr_offset(self, descr): + return self.offset_descr[descr] + def get_offset(self, box): assert box.is_constant() uvalue = self.unique_value(box) @@ -171,18 +174,17 @@ if self.constant_64_zeros != -1: self.constant_64_zeros = self.ensure_value(0x0) if self.constant_64_sign_bit != -1: - self.constant_64_zeros = self.ensure_value(-2**63) # == 0x8000000000000000 + self.constant_64_sign_bit = self.ensure_value(-2**63) # == 0x8000000000000000 if self.constant_max_64_positive != -1: self.constant_max_64_positive = self.ensure_value(0x7fffFFFFffffFFFF) + asm.mc.write('\x00' * self.size) wrote = 0 for val, offset in self.offset_map.items(): if not we_are_translated(): print('pool: %s at offset: %d' % (val, offset)) - asm.mc.write_i64(val) + self.overwrite_64(asm.mc, offset, val) wrote += 8 - self.offset_map = {} # for the descriptors - asm.mc.write('\x00' * (self.size - wrote)) if not we_are_translated(): print "pool with %d quad words" % (self.size // 8) From pypy.commits at gmail.com Wed Jan 20 09:06:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 06:06:33 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed a pool issue where a guard token did not receive it's rightful pool position Message-ID: <569f9469.44e21c0a.53a83.ffffc04d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81879:eb2fcf893ee0 Date: 2016-01-20 15:05 +0100 http://bitbucket.org/pypy/pypy/changeset/eb2fcf893ee0/ Log: fixed a pool issue where a guard token did not receive it's rightful pool position diff --git a/rpython/jit/backend/test/zll_stress.py b/rpython/jit/backend/test/zll_stress.py --- a/rpython/jit/backend/test/zll_stress.py +++ b/rpython/jit/backend/test/zll_stress.py @@ -19,4 +19,5 @@ r = Random() r.jumpahead(piece*99999999) for i in range(piece*per_piece, (piece+1)*per_piece): + print " i = %d; r.setstate(%s)" % (i, r.getstate()) check_random_function(cpu, LLtypeOperationBuilder, r, i, total_iterations) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -132,7 +132,7 @@ fail_descr, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 pool_offset = guardtok._pool_offset - + assert pool_offset != -1 # overwrite the gcmap in the jitframe offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -641,6 +641,7 @@ token = ZARCHGuardToken(self.cpu, gcmap, descr, op.getfailargs(), arglocs, op.getopnum(), frame_depth, fcond) + token._pool_offset = self.pool.get_descr_offset(descr) return token def emit_guard_true(self, op, arglocs, regalloc): diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -125,7 +125,7 @@ self.constant_64_zeros = -1 self.constant_64_ones = -1 self.constant_64_sign_bit = -1 - self.constant_max_64_positive -1 + self.constant_max_64_positive = -1 def check_size(self, size=-1): if size == -1: @@ -190,6 +190,7 @@ def overwrite_64(self, mc, index, value): index += self.pool_start + mc.overwrite(index, chr(value >> 56 & 0xff)) mc.overwrite(index+1, chr(value >> 48 & 0xff)) mc.overwrite(index+2, chr(value >> 40 & 0xff)) @@ -209,6 +210,6 @@ offset = self.offset_descr[descr] assert isinstance(offset, int) assert offset >= 0 - guard_token._pool_offset = offset + assert guard_token._pool_offset != -1 ptr = rffi.cast(lltype.Signed, guard_token.gcmap) self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) From pypy.commits at gmail.com Wed Jan 20 09:35:12 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 20 Jan 2016 06:35:12 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: rbigint frombytes/tobytes is specific to little endian Message-ID: <569f9b20.953f1c0a.3b95c.ffffbe71@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81880:ef5a0afd8184 Date: 2016-01-20 15:34 +0100 http://bitbucket.org/pypy/pypy/changeset/ef5a0afd8184/ Log: rbigint frombytes/tobytes is specific to little endian diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -296,7 +296,7 @@ if not s: return NULLRBIGINT - if byteorder != BYTEORDER: + if byteorder == 'big': msb = ord(s[0]) itr = range(len(s)-1, -1, -1) else: @@ -336,7 +336,7 @@ if not signed and self.sign == -1: raise InvalidSignednessError() - bswap = byteorder != BYTEORDER + bswap = byteorder == 'big' d = _widen_digit(0) j = 0 imax = self.numdigits() From pypy.commits at gmail.com Wed Jan 20 15:47:51 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 20 Jan 2016 12:47:51 -0800 (PST) Subject: [pypy-commit] benchmarks default: add dropbox/pyxl benchmark Message-ID: <569ff277.01adc20a.fd20d.ffffcfea@mx.google.com> Author: fijal Branch: Changeset: r345:283ed0844257 Date: 2016-01-20 21:47 +0100 http://bitbucket.org/pypy/benchmarks/changeset/283ed0844257/ Log: add dropbox/pyxl benchmark diff too long, truncating to 2000 out of 5559 lines diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -60,6 +60,7 @@ opts = { 'gcbench' : {'iteration_scaling' : .10}, 'pidigits': {'iteration_scaling' : .10}, + 'pyxl_bench': {'bm_env': {'PYTHONPATH': relative('lib/pyxl')}}, 'eparse' : {'bm_env': {'PYTHONPATH': relative('lib/monte')}}, 'bm_mako' : {'bm_env': {'PYTHONPATH': relative('lib/mako')}}, 'bm_dulwich_log': {'bm_env': {'PYTHONPATH': relative('lib/dulwich-0.9.1')}}, @@ -83,7 +84,7 @@ 'raytrace-simple', 'crypto_pyaes', 'bm_mako', 'bm_chameleon', 'json_bench', 'pidigits', 'hexiom2', 'eparse', 'deltablue', 'bm_dulwich_log', 'bm_krakatau', 'bm_mdp', 'pypy_interp', - 'sqlitesynth']: + 'sqlitesynth', 'pyxl_bench']: _register_new_bm(name, name, globals(), **opts.get(name, {})) for name in ['names', 'iteration', 'tcp', 'pb', ]:#'web']:#, 'accepts']: diff --git a/lib/pyxl/LICENSE b/lib/pyxl/LICENSE new file mode 100644 --- /dev/null +++ b/lib/pyxl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lib/pyxl/MANIFEST b/lib/pyxl/MANIFEST new file mode 100644 --- /dev/null +++ b/lib/pyxl/MANIFEST @@ -0,0 +1,19 @@ +# file GENERATED by distutils, do NOT edit +README +finish_install.py +pyxl.pth +setup.py +emacs/pyxl-mode.el +pyxl/__init__.py +pyxl/base.py +pyxl/element.py +pyxl/html.py +pyxl/utils.py +pyxl/codec/__init__.py +pyxl/codec/parser.py +pyxl/codec/register.py +pyxl/codec/tokenizer.py +pyxl/examples/__init__.py +pyxl/examples/hello_world.py +pyxl/scripts/__init__.py +pyxl/scripts/parse_file.py diff --git a/lib/pyxl/MANIFEST.in b/lib/pyxl/MANIFEST.in new file mode 100644 --- /dev/null +++ b/lib/pyxl/MANIFEST.in @@ -0,0 +1,2 @@ +include README pyxl.pth finish_install.py +recursive-include emacs *.el diff --git a/lib/pyxl/README b/lib/pyxl/README new file mode 120000 --- /dev/null +++ b/lib/pyxl/README @@ -0,0 +1,1 @@ +README.md \ No newline at end of file diff --git a/lib/pyxl/README.md b/lib/pyxl/README.md new file mode 100644 --- /dev/null +++ b/lib/pyxl/README.md @@ -0,0 +1,235 @@ +Pyxl is an open source package that extends Python to support inline HTML. It converts HTML fragments into valid Python expressions, and is meant as a replacement for traditional python templating systems like [Mako](http://www.makotemplates.org/) or [Cheetah](http://www.cheetahtemplate.org/). It automatically escapes data, enforces correct markup and makes it easier to write reusable and well structured UI code. Pyxl was inspired by the [XHP](https://github.com/facebook/xhp/wiki) project at Facebook. + +This project only supports Python 2. However, a [Python 3 fork](https://github.com/gvanrossum/pyxl3) is available. + +## Motivation + +At Cove, where Pyxl was developed, we found that using templates was getting in the way of quickly building new features. There were the usual issues of remembering to escape data to prevent XSS holes, avoiding invalid markup and deciphering cryptic stack traces. More importantly, our templates were getting hard to manage and understand which made iterating on our product more work than should be necessary. + +Existing templating systems do support things like logic and reusable modules - but they are essentially like having a different programming language for writing UI which falls well short of python itself. The primary reason templating systems exist is because creating HTML in languages like python means writing crazy string manipulation code, or losing the niceness of writing actual HTML by doing something like this: + +```py +import html +print ( + html.head().appendChild( + html.body().appendChild( + html.text("Hello World!")))) +``` + +To get around these limitations, we developed Pyxl which allowed us to treat HTML as a part of the python language itself. So, writing the above example with Pyxl would look like: + +```py +# coding: pyxl +print Hello World! +``` + +This meant no longer dealing with a separate "templating" language, and a lot more control over how we wrote our front-end code. Also, since Pyxl maps HTML to structured python objects and expressions instead of arbitrary blobs of strings, adding support for things like automatically escaping data was trivial. Switching to Pyxl led to much cleaner and modularized UI code, and allowed us to write new features and pages a lot quicker. + +## Installation + +Clone the repo and run the following commands from the directory you cloned to. + +```sh +python setup.py build +sudo python setup.py install +sudo python finish_install.py +``` + +To confirm that Pyxl was correctly installed, run the following command from the same directory: + +```sh +python pyxl/examples/hello_world.py +``` + +You should see the string `Hello World!` printed out. Thats it! You're ready to use Pyxl. + +## Running the tests + +After installing pyxl: + +```sh +easy_install unittest2 +python pyxl_tests.py +``` + +## How it works + +Pyxl converts HTML tags into python objects before the file is run through the interpreter, so the code that actually runs is regular python. For example, the `Hello World` example above is converted into: + +```py +print x_head().append_children(x_body().append_children("Hello World!")) +``` + +Pyxl's usefulness comes from being able to write HTML rather than unwieldy object instantiations and function calls. Note that Pyxl automatically adds objects for all HTML tags to Python builtins, so there is no need to import `x_head` or `x_body` in the example above. + +The conversion to Python is relatively straightforward: Opening tags are converted into object instantiations for the respective tag, nested tags are passed in as arguments to the `append_children` method, and closing tags close the bracket to the `append_children` call. As a result, a big advantage of this is that stack traces on errors map directly to what you've written. To learn more about how Pyxl does this, see the **Implementation Details** section below. + +## Documentation + +All python files with inline HTML must have the following first line: + +```py +# coding: pyxl +``` + +With that, you can start using HTML in your python file. + +### Inline Python Expressions + +Anything wrapped with {}'s is evaluated as a python expression. Please note that attribute values must be wrapped inside quotes, regardless of whether it contains a python expression or not. When used in attribute values, the python expression must evaluate to something that can be cast to unicode. When used inside a tag, the expression can evaluate to anything that can be cast to unicode, an HTML tag, or a list containing those two types. This is demonstrated in the example below: + +```py +image_name = "bolton.png" +image = + +text = "Michael Bolton" +block =
    {image}{text}
    + +element_list = [image, text] +block2 =
    {element_list}
    +``` + +### Dynamic Elements + +Pyxl converts tags into python objects in the background, which inherit from a class called [`x_base`](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/base.py). This means that tags have certain methods you can call on them. Here is an example snippet that uses the `append` function to dynamically create an unordered list. + +```py +items = ['Puppies', 'Dragons'] +nav =
      +for text in items: + nav.append(
    • {text}
    • ) +``` + +Another useful function is `children()`, which returns a list of all the child nodes for an element. `children()` accepts an optional selector string as an argument to filter the children. Currently, there is only support for filtering the children by a class (format: ".class_name"), id (format: "#id_string") or tag name. Here is a snippet which adds all `input` elements from an existing form to a new form: + +```py +new_form =
      {old_form.children("input")}
      +``` + +### Attributes + +You can access any attribute of a tag as a member variable on the tag, or via the `attr(attr_name)` function. Setting attribute must happen via the `set_attr(attr_name, attr_value)` function i.e. do not set attrs by directly setting member variables. To access attributes that contain '-' (hypen) as a member variable, replace the hypen with '_' (underscore). For this reason, pyxl does not allow attributes with an underscore in their name. Here is an example that demonstrates all these principles: + +```py +fruit =
      +print fruit.data_text +fruit.set_attr('data-text', 'clementine') +print fruit.attr('data-text') # alternate method for accessing attributes +``` + +### Escaping + +Pyxl automatically escapes all data and attribute values, therefore all your markup is XSS safe by default. One can explicitly avoid escaping by wrapping data in a call to `rawhtml`, but that only applies to data inside a tag. Everything in attribute values is always escaped. Note that static text inside tags (i.e. anything not inside {}'s) is considered regular HTML and is not escaped. + +```py +safe_value = "Puppies!" +unsafe_value = "" +unsafe_attr = '">' +print (
      + {unsafe_value} + {rawhtml(safe_value)} +
      ) +``` + +The above script will print out: + +```html +
      + <script>bad();</script> + Puppies! +
      +``` + +### UI Modules + +UI Modules are especially useful for creating re-usable building blocks in your application, making it quicker to implement new features, and keeping the UI consistent. Pyxl thinks of UI modules as user defined HTML tags, and so they are used just like you would use a `
      ` or any other tag. + +Creating UI modules in Pyxl simply means creating a class that inherits from [`x_element`](https://github.com/dropboxe/pyxl/blob/master/pyxl/pyxl/element.py) and implements the `render()` method. Modules must be prefixed with `x_`. This is an arbitrary requirement, but is useful in separating out pyxl modules from other things. + +Arguments to a UI module are passed as attributes to the UI module tag. Attribute values for these tags need not evaluate to samething that can be cast to unicode, ONLY if the attribute value is a single python expression i.e. the only thing inside the quotes is a {} wrapped python expression. This allows one to pass in any type to a UI module. To demonstrate, a useful UI module is a user badge, which displays a user profile picture with the user's name and some arbitrary content to the right of it: + +```py +# coding: pyxl +from pyxl.element import x_element + +class x_user_badge(x_element): + __attrs__ = { + 'user': object, + } + def render(self): + return ( +
      + +
      +
      {self.user.name}
      + {self.children()} +
      +
      ) +``` + +This makes the tag `` available to us which accepts `user` as an attribute which is an object that contains the user's name and profile picture. Here is an example of this new UI module being used. + +```py +# coding: pyxl +from some_module import x_user_badge + +user = User.get(some_user_id) +content =
      Any arbitrary content...
      +print {content} +``` + +Some things to note about UI modules. + +* Modules names must begin with `x_` and be an instance of `x_element` +* Modules must specify the attributes they accept via the `__attrs__` class variable. This is a dictionary where the key is the attribute name, and the value is the attribute type. Passing an attribute that is not listed in `__attrs__` will result in an error. The only exceptions are attributes accepted by all pyxl elements i.e. id, class, style, onclick, title and anything prefixed with "data-" or "aria-" +* Providing a `class` attribute for a UI module element will automatically append the class string to the underlying HTML element the UI module renders. This is useful when you want to style UI modules differently based on where it is being rendered. + +### Fragments + +The [`pyxl.html`](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/html.py) module provides the `` tag, which allows one to group a set of HTML tags without a parent. Rendering the `` tag simply renders all the children, and doesn't add to the markup. + +### Conditional HTML + +Pyxl avoids support for logic within the HTML flow, except for one case where we found it especially useful: conditionally rendering HTML. That is why Pyxl provides the `` tag, which takes an attr called `cond`. Children of an `` are only rendered if `cond` evaluates to True. + +## Implementation Details + +### Parsing + +Pyxl uses support for specifying source code encodings as described in [PEP 263](http://www.python.org/dev/peps/pep-0263/) to do what it does. The functionality was originally provided so that python developers could write code in non-ascii languages (eg. chinese variable names). Pyxl creates a custom encoding called pyxl which allows it to convert XML into regular python before the file is compiled. Once the pyxl codec is registered, any file starting with `# coding: pyxl` is run through the pyxl parser before compilation. + +To register the pyxl codec, one must import the [`pyxl.codec.register`](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/codec/register.py) module. The **Installation Process** makes it so that this always happens at python startup via the final `sudo python finish_install.py` step. What this step is doing is adding a file called `pyxl.pth` in your python site-packages directory, which imports the `pyxl.codec.register` module. Anything with a `.pth` extension in the site-packages directory is run automatically at python startup. Read more about that [here](http://docs.python.org/library/site.html). + +Some people may prefer avoiding adding pyxl.pth to their site-packages directory, in which case they should skip the final step of the installation process and explicitly import `pyxl.codec.register` in the entry point of their application. + +The pyxl encoding is a wrapper around utf-8, but every time it encounters a blob of HTML in the file, it runs it through python's [`HTMLParser`](http://docs.python.org/library/htmlparser.html) and replaces the HTML with python objects. As explained above, opening tags are converted into object instantiations for the respective tag, nested tags are passed in as arguments to the `append_children` method, and closing tags close the bracket to the `append_children` call. The code for these conversions can be seen [here](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/codec/parser.py). + +### HTML Objects + +Though the syntactic sugar of being able to write HTML in python is pyxl's biggest usefulness, pyxl does also provide a basic framework for dealing with HTML tags as objects. This is not a full DOM implementation, but provides most of the necessary functionality. All the basic HTML tags are represented by objects defined in the [`pyxl.html`](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/html.py) module, all of which inherit from the [`x_base`](https://github.com/dropbox/pyxl/blob/master/pyxl/pyxl/base.py) class. + +An HTML tag is rendered by calling the `to_string()` method (called automatically when tags are cast to strings), which recursively calls `to_string()` on all its children. Therefore, it should be noted that almost all the work happens only once `to_string()` is called. It is also at this stage where attribute values and data is escaped. Most of the work consists of string concatenations, and performance based on applications we've written is equivalent to templating engines like Cheetah. Note that there is probably some low hanging fruit in performance improvements that we haven't looked in to (mostly because it hasn't been a problem). + +## Editor Support + +### Emacs + +Grab pyxl-mode.el from the downloaded package under `pyxl/emacs/pyxl-mode.el` or copy it from [here](https://github.com/dropbox/pyxl/blob/master/emacs/pyxl-mode.el). To install, drop the file anywhere on your load path, and add the following to your ~/.emacs file (GNU Emacs) or ~/.xemacs/init.el file (XEmacs): + +```py +(autoload 'pyxl-mode "pyxl-mode" "Major mode for editing pyxl" t) +(setq auto-mode-alist + (cons '("\\.py\\'" . pyxl-mode) auto-mode-alist)) +``` + +### Vim + +Pyxl detection, syntax, and indent files are in the `vim` directory. The easiest way to install the vim support is via [pathogen](https://github.com/tpope/vim-pathogen); with pathogen, you can simply link or copy the directory into your bundle directory. Without pathogen, place the various files in the corresponding subdirectories of your .vim directory. + +### Pycharm + +See [pycharm-pyxl](https://github.com/christoffer/pycharm-pyxl). + +### Sublime Text + +See [sublime-pyxl](https://github.com/yyjhao/sublime-pyxl). diff --git a/lib/pyxl/emacs/pyxl-mode.el b/lib/pyxl/emacs/pyxl-mode.el new file mode 100644 --- /dev/null +++ b/lib/pyxl/emacs/pyxl-mode.el @@ -0,0 +1,132 @@ +;;; pyxl-mode.el --- major mode for editing pyxl enabled Python +;;; +;;; @author Akhil Wable +;;; +;;; To install, drop this anywhere on your load path, and add the following to +;;; your ~/.emacs file (GNU Emacs) or ~/.xemacs/init.el file (XEmacs): +;;; +;;; (autoload 'pyxl-mode "pyxl-mode" "Major mode for editing pyxl" t) +;;; (setq auto-mode-alist +;;; (cons '("\\.py\\'" . pyxl-mode) auto-mode-alist)) +;;; + +(require 'cl) +(require 'python) + +(defcustom pyxl-mode-hook nil + "list of functions to be executed on entry to pyxl-mode." + :type 'hook + :group 'python) + +(defun pyxl-context-p () + "Does the range include some HTML?" + (let ((start-rexp "([ \n\t]*<") + (end-rexp ">[ \n\t]*)")) + (let ((backward-start (save-excursion (re-search-backward start-rexp nil t))) + (backward-end (save-excursion (re-search-backward end-rexp nil t)))) + (if (and backward-start + (or (not backward-end) (< backward-end backward-start))) + backward-start + nil)))) + +(defun pyxl-back-to-indentation () + (let ((first-non-indent + (save-excursion + (back-to-indentation) + (point)))) + (if (< (point) first-non-indent) + (back-to-indentation)))) + +(defun pyx-indent-line-helper () + "Indent a line containing html." + ;; nesting regex matches either an opening tag OR a closing tag + (let ((nesting-regex "\\(<[:a-zA-Z][:a-zA-Z0-9_]*\\)\\|\\(\\)") + (indent-from (line-beginning-position)) + (depth 1)) + (save-excursion + (re-search-backward "([ \n\t]*<" nil t) + (let ((starting-indent (current-indentation))) + (while (and (< (point) indent-from) + (re-search-forward nesting-regex indent-from t)) + (if (match-string 1) (incf depth)) + (if (match-string 2) (decf depth))) + (goto-char indent-from) + (indent-line-to + (+ starting-indent + (* 4 depth) + (if (looking-at "[ \t]*\\(?:\\)") -4 0))))) + (pyxl-back-to-indentation))) + +(defun pyxl-indent-line () + "Modify indent for a line of html." + (interactive) + (save-excursion + (if (pyxl-context-p) + ;; If a line is inside html, use the custom indent function + (pyx-indent-line-helper) + ;; Fall back to regular python indentation for no html + (python-indent-line))) + + (pyxl-back-to-indentation)) + +(defun pyxl-indent-region (start end) + (save-excursion + (goto-char end) + (setq end (point-marker)) + (goto-char start) + (or (bolp) (forward-line 1)) + (while (< (point) end) + (or (and (bolp) (eolp)) + (pyxl-indent-line)) + (forward-line 1)) + (move-marker end nil))) + +(defcustom pyxl-default-face 'default + "Default face in pyxl-mode buffers." + :type 'face + :group 'pyxl-mode) + +(defconst pyxl-font-lock-keywords + (append + (list + ;; tags + '("\\(]*-->" (0 font-lock-comment-face)) + + ;; XML entities + '("&\\w+;" . font-lock-constant-face) + ) + python-font-lock-keywords) + "Font Lock for pyxl mode.") + +;;;###autoload +(define-derived-mode pyxl-mode python-mode "pyxl" + "Major mode for editing Python code with pyxl." + + ;; Adapted from python-mode.el + (set (make-local-variable 'font-lock-defaults) + '(pyxl-font-lock-keywords + nil + nil + nil + nil + '(font-lock-syntactic-keywords . python-font-lock-syntactic-keywords) + ;; This probably isn't worth it. + ;; (font-lock-syntactic-face-function + ;; . python-font-lock-syntactic-face-function) + )) + + (setq indent-line-function 'pyxl-indent-line) + (setq indent-region-function 'pyxl-indent-region) + (run-hooks 'pyxl-mode-hook)) + +(provide 'pyxl-mode) + +;; In python-mode.el RET is bound to newline-and-indent, which indents the next line if necessary. +;; In python.el which we're extending, this is bound to C-j instead. +;; This binds RET to newline-and-indent +(add-hook + 'python-mode-hook + '(lambda () (define-key python-mode-map "\C-m" 'newline-and-indent))) diff --git a/lib/pyxl/finish_install.py b/lib/pyxl/finish_install.py new file mode 100644 --- /dev/null +++ b/lib/pyxl/finish_install.py @@ -0,0 +1,5 @@ +import shutil +from distutils.sysconfig import get_python_lib + +python_lib = get_python_lib() +shutil.copy('pyxl.pth', python_lib) diff --git a/lib/pyxl/pyxl.pth b/lib/pyxl/pyxl.pth new file mode 100644 --- /dev/null +++ b/lib/pyxl/pyxl.pth @@ -0,0 +1,1 @@ +import pyxl.codec.register diff --git a/lib/pyxl/pyxl/__init__.py b/lib/pyxl/pyxl/__init__.py new file mode 100755 --- /dev/null +++ b/lib/pyxl/pyxl/__init__.py @@ -0,0 +1,1 @@ +#!/usr/bin/env python diff --git a/lib/pyxl/pyxl/base.py b/lib/pyxl/pyxl/base.py new file mode 100755 --- /dev/null +++ b/lib/pyxl/pyxl/base.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python + +# We want a way to generate non-colliding 'pyxl' ids for elements, so we're +# using a non-cryptographically secure random number generator. We want it to be +# insecure because these aren't being used for anything cryptographic and it's +# much faster (2x). We're also not using NumPy (which is even faster) because +# it's a difficult dependency to fulfill purely to generate random numbers. +import random +import sys + +from pyxl.utils import escape + +class PyxlException(Exception): + pass + +class x_base_metaclass(type): + def __init__(self, name, parents, attrs): + super(x_base_metaclass, self).__init__(name, parents, attrs) + x_base_parents = [parent for parent in parents if hasattr(parent, '__attrs__')] + parent_attrs = x_base_parents[0].__attrs__ if len(x_base_parents) else {} + self_attrs = self.__dict__.get('__attrs__', {}) + + # Dont allow '_' in attr names + for attr_name in self_attrs: + assert '_' not in attr_name, ( + "%s: '_' not allowed in attr names, use '-' instead" % attr_name) + + combined_attrs = dict(parent_attrs) + combined_attrs.update(self_attrs) + setattr(self, '__attrs__', combined_attrs) + setattr(self, '__tag__', name[2:]) + +class x_base(object): + + __metaclass__ = x_base_metaclass + __attrs__ = { + # HTML attributes + 'accesskey': unicode, + 'class': unicode, + 'dir': unicode, + 'id': unicode, + 'lang': unicode, + 'maxlength': unicode, + 'role': unicode, + 'style': unicode, + 'tabindex': int, + 'title': unicode, + 'xml:lang': unicode, + + # Microdata HTML attributes + 'itemtype': unicode, + 'itemscope': unicode, + 'itemprop': unicode, + 'itemid': unicode, + 'itemref': unicode, + + # JS attributes + 'onabort': unicode, + 'onblur': unicode, + 'onchange': unicode, + 'onclick': unicode, + 'ondblclick': unicode, + 'onerror': unicode, + 'onfocus': unicode, + 'onkeydown': unicode, + 'onkeypress': unicode, + 'onkeyup': unicode, + 'onload': unicode, + 'onmousedown': unicode, + 'onmouseenter': unicode, + 'onmouseleave': unicode, + 'onmousemove': unicode, + 'onmouseout': unicode, + 'onmouseover': unicode, + 'onmouseup': unicode, + 'onreset': unicode, + 'onresize': unicode, + 'onselect': unicode, + 'onsubmit': unicode, + 'onunload': unicode, + } + + def __init__(self, **kwargs): + self.__attributes__ = {} + self.__children__ = [] + + for name, value in kwargs.iteritems(): + self.set_attr(x_base._fix_attribute_name(name), value) + + def __call__(self, *children): + self.append_children(children) + return self + + def get_id(self): + eid = self.attr('id') + if not eid: + eid = 'pyxl%d' % random.randint(0, sys.maxint) + self.set_attr('id', eid) + return eid + + def children(self, selector=None, exclude=False): + if not selector: + return self.__children__ + + # filter by class + if selector[0] == '.': + select = lambda x: selector[1:] in x.get_class() + + # filter by id + elif selector[0] == '#': + select = lambda x: selector[1:] == x.get_id() + + # filter by tag name + else: + select = lambda x: x.__class__.__name__ == ('x_%s' % selector) + + if exclude: + func = lambda x: not select(x) + else: + func = select + + return filter(func, self.__children__) + + def append(self, child): + if type(child) in (list, tuple) or hasattr(child, '__iter__'): + self.__children__.extend(c for c in child if c is not None and c is not False) + elif child is not None and child is not False: + self.__children__.append(child) + + def prepend(self, child): + if child is not None and child is not False: + self.__children__.insert(0, child) + + def __getattr__(self, name): + return self.attr(name.replace('_', '-')) + + def attr(self, name, default=None): + # this check is fairly expensive (~8% of cost) + if not self.allows_attribute(name): + raise PyxlException('<%s> has no attr named "%s"' % (self.__tag__, name)) + + value = self.__attributes__.get(name) + + if value is not None: + return value + + attr_type = self.__attrs__.get(name, unicode) + if type(attr_type) == list: + if not attr_type: + raise PyxlException('Invalid attribute definition') + + if None in attr_type[1:]: + raise PyxlException('None must be the first, default value') + + return attr_type[0] + + return default + + def transfer_attributes(self, element): + for name, value in self.__attributes__.iteritems(): + if element.allows_attribute(name) and element.attr(name) is None: + element.set_attr(name, value) + + def set_attr(self, name, value): + # this check is fairly expensive (~8% of cost) + if not self.allows_attribute(name): + raise PyxlException('<%s> has no attr named "%s"' % (self.__tag__, name)) + + if value is not None: + attr_type = self.__attrs__.get(name, unicode) + + if type(attr_type) == list: + # support for enum values in pyxl attributes + values_enum = attr_type + assert values_enum, 'Invalid attribute definition' + + if value not in values_enum: + msg = '%s: %s: incorrect value "%s" for "%s". Expecting enum value %s' % ( + self.__tag__, self.__class__.__name__, value, name, values_enum) + raise PyxlException(msg) + + else: + try: + # Validate type of attr and cast to correct type if possible + value = value if isinstance(value, attr_type) else attr_type(value) + except Exception: + exc_type, exc_obj, exc_tb = sys.exc_info() + msg = '%s: %s: incorrect type for "%s". expected %s, got %s' % ( + self.__tag__, self.__class__.__name__, name, attr_type, type(value)) + exception = PyxlException(msg) + raise exception, None, exc_tb + + self.__attributes__[name] = value + + elif name in self.__attributes__: + del self.__attributes__[name] + + def get_class(self): + return self.attr('class', '') + + def add_class(self, xclass): + if not xclass: return + current_class = self.attr('class') + if current_class: current_class += ' ' + xclass + else: current_class = xclass + self.set_attr('class', current_class) + + def append_children(self, children): + for child in children: + self.append(child) + + def attributes(self): + return self.__attributes__ + + def set_attributes(self, attrs_dict): + for name, value in attrs_dict.iteritems(): + self.set_attr(name, value) + + def allows_attribute(self, name): + return (name in self.__attrs__ or name.startswith('data-') or name.startswith('aria-')) + + def to_string(self): + l = [] + self._to_list(l) + return u''.join(l) + + def _to_list(self, l): + raise NotImplementedError() + + def __str__(self): + return self.to_string() + + def __unicode__(self): + return self.to_string() + + @staticmethod + def _render_child_to_list(child, l): + if isinstance(child, x_base): child._to_list(l) + elif child is not None: l.append(escape(child)) + + @staticmethod + def _fix_attribute_name(name): + if name == 'xclass': return 'class' + if name == 'xfor': return 'for' + return name.replace('_', '-').replace('COLON', ':') diff --git a/lib/pyxl/pyxl/browser_hacks.py b/lib/pyxl/pyxl/browser_hacks.py new file mode 100644 --- /dev/null +++ b/lib/pyxl/pyxl/browser_hacks.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from pyxl.base import x_base +from pyxl.utils import escape + +class x_cond_comment(x_base): + __attrs__ = { + 'cond': unicode, + } + + def _to_list(self, l): + # allow '&', escape everything else from cond + cond = self.__attributes__.get('cond', '') + cond = '&'.join(map(escape, cond.split('&'))) + + l.extend((u'') + +class x_cond_noncomment(x_base): + ''' This is a conditional comment where browsers which don't support conditional comments + will parse the children by default. ''' + __attrs__ = { + 'cond': unicode, + } + + def _to_list(self, l): + # allow '&', escape everything else from cond + cond = self.__attributes__.get('cond', '') + cond = '&'.join(map(escape, cond.split('&'))) + + l.extend((u'')) + + for child in self.__children__: + x_base._render_child_to_list(child, l) + + l.append(u'') + diff --git a/lib/pyxl/pyxl/codec/__init__.py b/lib/pyxl/pyxl/codec/__init__.py new file mode 100755 --- /dev/null +++ b/lib/pyxl/pyxl/codec/__init__.py @@ -0,0 +1,1 @@ +#!/usr/bin/env python diff --git a/lib/pyxl/pyxl/codec/html_tokenizer.py b/lib/pyxl/pyxl/codec/html_tokenizer.py new file mode 100644 --- /dev/null +++ b/lib/pyxl/pyxl/codec/html_tokenizer.py @@ -0,0 +1,416 @@ +""" +A naive but strict HTML tokenizer. Based directly on +http://www.w3.org/TR/2011/WD-html5-20110525/tokenization.html + +In the ATTRIBUTE_VALUE and BEFORE_ATTRIBUTE_VALUE states, python tokens are accepted. +""" + +import sys +from collections import OrderedDict + +class State(object): + DATA = 1 + # unused states: charrefs, RCDATA, script, RAWTEXT, PLAINTEXT + TAG_OPEN = 7 + END_TAG_OPEN = 8 + TAG_NAME = 9 + # unused states: RCDATA, RAWTEXT, script + BEFORE_ATTRIBUTE_NAME = 34 + ATTRIBUTE_NAME = 35 + AFTER_ATTRIBUTE_NAME = 36 + BEFORE_ATTRIBUTE_VALUE = 37 + ATTRIBUTE_VALUE_DOUBLE_QUOTED = 38 + ATTRIBUTE_VALUE_SINGLE_QUOTED = 39 + ATTRIBUTE_VALUE_UNQUOTED = 40 + # unused state: CHARREF_IN_ATTRIBUTE_VALUE = 41 + AFTER_ATTRIBUTE_VALUE = 42 + SELF_CLOSING_START_TAG = 43 + # unused state: BOGUS_COMMENT_STATE = 44 + MARKUP_DECLARATION_OPEN = 45 + COMMENT_START = 46 + COMMENT_START_DASH = 47 + COMMENT = 48 + COMMENT_END_DASH = 49 + COMMENT_END = 50 + # unused state: COMMENT_END_BANG = 51 + DOCTYPE = 52 + DOCTYPE_CONTENTS = 53 # Gross oversimplification. Not to spec. + # unused states: doctypes + CDATA_SECTION = 68 + + @classmethod + def state_name(cls, state_val): + for k, v in cls.__dict__.iteritems(): + if v == state_val: + return k + assert False, "impossible state value %r!" % state_val + +class Tag(object): + def __init__(self): + self.tag_name = None + self.attrs = OrderedDict() + self.endtag = False + self.startendtag = False + +class ParseError(Exception): + pass + +class BadCharError(Exception): + def __init__(self, state, char): + super(BadCharError, self).__init__("unexpected character %r in state %r" % + (char, State.state_name(state))) + +class Unimplemented(Exception): + pass + +class HTMLTokenizer(object): + + def __init__(self): + self.state = State.DATA + + # attribute_value is a list, where each element is either a string or a list of python + # tokens. + + self.data = "" + self.tag = None + self.tag_name = None + self.attribute_name = None + self.attribute_value = None + self.markup_declaration_buffer = None + + def handle_data(self, data): + assert False, "subclass should override" + + def handle_starttag(self, tag_name, attrs): + assert False, "subclass should override" + + def handle_startendtag(self, tag_name, attrs): + assert False, "subclass should override" + + def handle_endtag(self, tag_name): + assert False, "subclass should override" + + def handle_comment(self, tag_name): + assert False, "subclass should override" + + def handle_doctype(self, data): + assert False, "subclass should override" + + def handle_cdata(self, tag_name): + assert False, "subclass should override" + + def emit_data(self): + self.handle_data(self.data) + self.data = "" + + def emit_tag(self): + if self.tag.startendtag and self.tag.endtag: + raise ParseError("both startendtag and endtag!?") + if self.tag.startendtag: + self.handle_startendtag(self.tag.tag_name, self.tag.attrs) + elif self.tag.endtag: + self.handle_endtag(self.tag.tag_name) + else: + self.handle_starttag(self.tag.tag_name, self.tag.attrs) + + def emit_comment(self): + self.handle_comment(self.data) + self.data = "" + + def emit_doctype(self): + self.handle_doctype(self.data) + self.data = "" + + def emit_cdata(self): + self.handle_cdata(self.data) + self.data = "" + + def got_attribute(self): + if self.attribute_name in self.tag.attrs: + raise ParseError("repeat attribute name %r" % self.attribute_name) + self.tag.attrs[self.attribute_name] = self.attribute_value + self.attribute_name = None + self.attribute_value = None + + def add_data_char(self, build, c): + """ For adding a new character to e.g. an attribute value """ + if len(build) and type(build[-1]) == str: + build[-1] += c + else: + build.append(c) + + def feed(self, c): + if self.state == State.DATA: + if c == '<': + self.emit_data() + self.state = State.TAG_OPEN + # Pass through; it's the browser's problem to understand these. + #elif c == '&': + # raise Unimplemented + else: + self.data += c + + elif self.state == State.TAG_OPEN: + self.tag = Tag() + if c == '!': + self.markup_declaration_buffer = "" + self.state = State.MARKUP_DECLARATION_OPEN + elif c == '/': + self.state = State.END_TAG_OPEN + elif c.isalpha(): + self.tag.tag_name = c + self.state = State.TAG_NAME + else: + raise BadCharError(self.state, c) + + elif self.state == State.END_TAG_OPEN: + self.tag.endtag = True + if c.isalpha(): + self.tag.tag_name = c + self.state = State.TAG_NAME + else: + raise BadCharError(self.state, c) + + elif self.state == State.TAG_NAME: + if c in '\t\n\f ': + self.state = State.BEFORE_ATTRIBUTE_NAME + elif c == '/': + self.state = State.SELF_CLOSING_START_TAG + elif c == '>': + self.emit_tag() + self.state = State.DATA + else: + self.tag.tag_name += c + + elif self.state == State.BEFORE_ATTRIBUTE_NAME: + if c in '\t\n\f ': + pass + elif c == '/': + self.state = State.SELF_CLOSING_START_TAG + elif c == '>': + self.emit_tag() + self.state = State.DATA + elif c in "\"'<=": + raise BadCharError(self.state, c) + else: + self.attribute_name = c.lower() + self.state = State.ATTRIBUTE_NAME + + elif self.state == State.ATTRIBUTE_NAME: + if c in '\t\n\f ': + self.state = State.AFTER_ATTRIBUTE_NAME + elif c == '/': + self.got_attribute() + self.state = State.SELF_CLOSING_START_TAG + elif c == '=': + self.state = State.BEFORE_ATTRIBUTE_VALUE + elif c == '>': + self.emit_tag() + self.state = State.DATA + elif c in "\"'<": + raise BadCharError(self.state, c) + else: + self.attribute_name += c.lower() + + elif self.state == State.AFTER_ATTRIBUTE_NAME: + if c in '\t\n\f ': + pass + elif c == '/': + self.got_attribute() + self.state = State.SELF_CLOSING_START_TAG + elif c == '=': + self.state = State.BEFORE_ATTRIBUTE_VALUE + elif c == '>': + self.got_attribute() + self.emit_tag() + self.state = State.DATA + elif c in "\"'<": + raise BadCharError(self.state, c) + + elif self.state == State.BEFORE_ATTRIBUTE_VALUE: + if c in '\t\n\f ': + pass + elif c == '"': + self.attribute_value = [] + self.state = State.ATTRIBUTE_VALUE_DOUBLE_QUOTED + elif c == '&': + self.attribute_value = [] + self.state = State.ATTRIBUTE_VALUE_UNQUOTED + self.feed(c) # rehandle c + elif c == "'": + self.attribute_value = [] + self.state = State.ATTRIBUTE_VALUE_SINGLE_QUOTED + elif c in '><=`': + raise BadCharError(self.state, c) + else: + self.attribute_value = [c] + self.state = State.ATTRIBUTE_VALUE_UNQUOTED + + elif self.state == State.ATTRIBUTE_VALUE_DOUBLE_QUOTED: + if c == '"': + self.state = State.AFTER_ATTRIBUTE_VALUE + # Pass through; it's the browser's problem to understand these. + #elif c == '&': + # raise Unimplemented + else: + self.add_data_char(self.attribute_value, c) + + elif self.state == State.ATTRIBUTE_VALUE_SINGLE_QUOTED: + if c == "'": + self.state = State.AFTER_ATTRIBUTE_VALUE + # Pass through; it's the browser's problem to understand these. + #elif c == '&': + # raise Unimplemented + else: + self.add_data_char(self.attribute_value, c) + + elif self.state == State.ATTRIBUTE_VALUE_UNQUOTED: + if c in '\t\n\f ': + self.got_attribute() + self.state = State.BEFORE_ATTRIBUTE_NAME + elif c == '>': + self.got_attribute() + self.emit_tag() + self.state = State.DATA + elif c in "\"'<=`": + raise BadCharError(self.state, c) + # Pass through; it's the browser's problem to understand these. + #elif c == '&': + # raise Unimplemented + else: + self.add_data_char(self.attribute_value, c) + + elif self.state == State.AFTER_ATTRIBUTE_VALUE: + self.got_attribute() + if c in '\t\n\f ': + self.state = State.BEFORE_ATTRIBUTE_NAME + elif c == '/': + self.state = State.SELF_CLOSING_START_TAG + elif c == '>': + self.emit_tag() + self.state = State.DATA + else: + raise BadCharError(self.state, c) + + elif self.state == State.SELF_CLOSING_START_TAG: + self.tag.startendtag = True + if c == '>': + self.emit_tag() + self.state = State.DATA + else: + raise BadCharError(self.state, c) + + elif self.state == State.MARKUP_DECLARATION_OPEN: + self.markup_declaration_buffer += c + if self.markup_declaration_buffer == "--": + self.data = "" + self.state = State.COMMENT_START + elif self.markup_declaration_buffer.lower() == "DOCTYPE".lower(): + self.state = State.DOCTYPE + elif self.markup_declaration_buffer == "[CDATA[": + self.data = "" + self.cdata_buffer = "" + self.state = State.CDATA_SECTION + elif not ("--".startswith(self.markup_declaration_buffer) or + "DOCTYPE".lower().startswith(self.markup_declaration_buffer.lower()) or + "[CDATA[".startswith(self.markup_declaration_buffer)): + raise BadCharError(self.state, c) + + elif self.state == State.COMMENT_START: + if c == "-": + self.state = State.COMMENT_START_DASH + elif c == ">": + raise BadCharError(self.state, c) + else: + self.data += c + self.state = State.COMMENT + + elif self.state == State.COMMENT_START_DASH: + if c == "-": + self.state = State.COMMENT_END + elif c == ">": + raise BadCharError(self.state, c) + else: + self.data += "-" + c + self.state = State.COMMENT + + elif self.state == State.COMMENT: + if c == "-": + self.state = State.COMMENT_END_DASH + else: + self.data += c + + elif self.state == State.COMMENT_END_DASH: + if c == "-": + self.state = State.COMMENT_END + else: + self.data += "-" + c + self.state = State.COMMENT + + elif self.state == State.COMMENT_END: + if c == ">": + self.emit_comment() + self.state = State.DATA + else: + raise BadCharError(self.state, c) + + elif self.state == State.DOCTYPE: + if c in "\t\n\f ": + self.data = "" + self.state = State.DOCTYPE_CONTENTS + else: + raise BadCharError(self.state, c) + + elif self.state == State.DOCTYPE_CONTENTS: + if c == ">": + self.emit_doctype() + self.state = State.DATA + else: + self.data += c + + elif self.state == State.CDATA_SECTION: + self.cdata_buffer += c + if self.cdata_buffer == "]]>": + self.emit_cdata() + self.state = State.DATA + else: + while self.cdata_buffer and not "]]>".startswith(self.cdata_buffer): + self.data += self.cdata_buffer[0] + self.cdata_buffer = self.cdata_buffer[1:] + + else: + assert False, "bad state! %r" % self.state + + def feed_python(self, tokens): + if self.state == State.BEFORE_ATTRIBUTE_VALUE: + self.attribute_value = [tokens] + self.state = State.ATTRIBUTE_VALUE_UNQUOTED + elif self.state in [State.ATTRIBUTE_VALUE_DOUBLE_QUOTED, + State.ATTRIBUTE_VALUE_SINGLE_QUOTED, + State.ATTRIBUTE_VALUE_UNQUOTED]: + self.attribute_value.append(tokens) + else: + raise ParseError("python not allow in state %r" % State.state_name(self.state)) + +class HTMLTokenDumper(HTMLTokenizer): + def handle_data(self, data): + print "DATA %r" % data + + def handle_starttag(self, tag_name, attrs): + print "STARTTAG %r %r" % (tag_name, attrs) + + def handle_startendtag(self, tag_name, attrs): + print "STARTENDTAG %r %r" % (tag_name, attrs) + + def handle_endtag(self, tag_name): + print "ENDTAG %r" % tag_name + +def main(filename): + dumper = HTMLTokenDumper() + with open(filename) as f: + for line in f: + for c in line: + dumper.feed(c) + +if __name__ == "__main__": + main(*sys.argv[1:]) diff --git a/lib/pyxl/pyxl/codec/parser.py b/lib/pyxl/pyxl/codec/parser.py new file mode 100755 --- /dev/null +++ b/lib/pyxl/pyxl/codec/parser.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python + +import tokenize +from pyxl import html +from html_tokenizer import ( + HTMLTokenizer, + ParseError as TokenizerParseError, + State, +) +from pytokenize import Untokenizer + +class ParseError(Exception): + def __init__(self, message, pos=None): + if pos is not None: + super(ParseError, self).__init__("%s at line %d char %d" % ((message,) + pos)) + else: + super(ParseError, self).__init__(message) + +class PyxlParser(HTMLTokenizer): + def __init__(self, row, col): + super(PyxlParser, self).__init__() + self.start = self.end = (row, col) + self.output = [] + self.open_tags = [] + self.remainder = None + self.next_thing_is_python = False + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + + def feed(self, token): + ttype, tvalue, tstart, tend, tline = token + + assert tstart[0] >= self.end[0], "row went backwards" + if tstart[0] > self.end[0]: + self.output.append("\n" * (tstart[0] - self.end[0])) + + # interpret jumps on the same line as a single space + elif tstart[1] > self.end[1]: + super(PyxlParser, self).feed(" ") + + self.end = tstart + + if ttype != tokenize.INDENT: + while tvalue and not self.done(): + c, tvalue = tvalue[0], tvalue[1:] + if c == "\n": + self.end = (self.end[0]+1, 0) + else: + self.end = (self.end[0], self.end[1]+1) + try: + super(PyxlParser, self).feed(c) + except TokenizerParseError: + raise ParseError("HTML Parsing error", self.end) + if self.done(): + self.remainder = (ttype, tvalue, self.end, tend, tline) + else: + self.end = tend + + def feed_python(self, tokens): + ttype, tvalue, tstart, tend, tline = tokens[0] + assert tstart[0] >= self.end[0], "row went backwards" + if tstart[0] > self.end[0]: + self.output.append("\n" * (tstart[0] - self.end[0])) + ttype, tvalue, tstart, tend, tline = tokens[-1] + self.end = tend + + if self.state in [State.DATA, State.CDATA_SECTION]: + self.next_thing_is_python = True + self.emit_data() + self.output.append("%s, " % Untokenizer().untokenize(tokens)) + self.next_thing_is_python = False + self.last_thing_was_python = True + elif self.state in [State.BEFORE_ATTRIBUTE_VALUE, + State.ATTRIBUTE_VALUE_DOUBLE_QUOTED, + State.ATTRIBUTE_VALUE_SINGLE_QUOTED, + State.ATTRIBUTE_VALUE_UNQUOTED]: + super(PyxlParser, self).feed_python(tokens) + + def feed_position_only(self, token): + """update with any whitespace we might have missed, and advance position to after the + token""" + ttype, tvalue, tstart, tend, tline = token + self.feed((ttype, '', tstart, tstart, tline)) + self.end = tend + + def python_comment_allowed(self): + """Returns true if we're in a state where a # starts a comment. + + + # comment in data + Link text + + """ + return self.state in (State.DATA, State.TAG_NAME, + State.BEFORE_ATTRIBUTE_NAME, State.AFTER_ATTRIBUTE_NAME, + State.BEFORE_ATTRIBUTE_VALUE, State.AFTER_ATTRIBUTE_VALUE, + State.COMMENT, State.DOCTYPE_CONTENTS, State.CDATA_SECTION) + + def python_mode_allowed(self): + """Returns true if we're in a state where a { starts python mode. + + + """ + return self.state not in (State.COMMENT,) + + def feed_comment(self, token): + ttype, tvalue, tstart, tend, tline = token + self.feed((ttype, '', tstart, tstart, tline)) + self.output.append(tvalue) + self.end = tend + + def get_remainder(self): + return self.remainder + + def done(self): + return len(self.open_tags) == 0 and self.state == State.DATA and self.output + + def get_token(self): + return (tokenize.STRING, ''.join(self.output), self.start, self.end, '') + + @staticmethod + def safe_attr_name(name): + if name == "class": + return "xclass" + if name == "for": + return "xfor" + return name.replace('-', '_').replace(':', 'COLON') + + def _handle_attr_value(self, attr_value): + def format_parts(): + prev_was_python = False + for i, part in enumerate(attr_value): + if type(part) == list: + yield part + prev_was_python = True + else: + next_is_python = bool(i+1 < len(attr_value) and type(attr_value[i+1]) == list) + part = self._normalize_data_whitespace(part, prev_was_python, next_is_python) + if part: + yield part + prev_was_python = False + + attr_value = list(format_parts()) + if len(attr_value) == 1: + part = attr_value[0] + if type(part) == list: + self.output.append(Untokenizer().untokenize(part)) + else: + self.output.append(repr(part)) + else: + self.output.append('u"".join((') + for part in attr_value: + if type(part) == list: + self.output.append('unicode(') + self.output.append(Untokenizer().untokenize(part)) + self.output.append(')') + else: + self.output.append(repr(part)) + self.output.append(', ') + self.output.append('))') + + @staticmethod + def _normalize_data_whitespace(data, prev_was_py, next_is_py): + if not data: + return '' + if '\n' in data and not data.strip(): + if prev_was_py and next_is_py: + return ' ' + else: + return '' + if prev_was_py and data.startswith('\n'): + data = " " + data.lstrip('\n') + if next_is_py and data.endswith('\n'): + data = data.rstrip('\n') + " " + data = data.strip('\n') + data = data.replace('\r', ' ') + data = data.replace('\n', ' ') + return data + + def handle_starttag(self, tag, attrs, call=True): + self.open_tags.append({'tag':tag, 'row': self.end[0]}) + if tag == 'if': + if len(attrs) != 1: + raise ParseError("if tag only takes one attr called 'cond'", self.end) + if 'cond' not in attrs: + raise ParseError("if tag must contain the 'cond' attr", self.end) + + self.output.append('html._push_condition(bool(') + self._handle_attr_value(attrs['cond']) + self.output.append(')) and html.x_frag()(') + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + return + elif tag == 'else': + if len(attrs) != 0: + raise ParseError("else tag takes no attrs", self.end) + if not self.last_thing_was_close_if_tag: + raise ParseError(" tag must come right after ", self.end) + + self.output.append('(not html._last_if_condition) and html.x_frag()(') + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + return + + module, dot, identifier = tag.rpartition('.') + identifier = 'x_%s' % identifier + x_tag = module + dot + identifier + + if hasattr(html, x_tag): + self.output.append('html.') + self.output.append('%s(' % x_tag) + + first_attr = True + for attr_name, attr_value in attrs.iteritems(): + if first_attr: first_attr = False + else: self.output.append(', ') + + self.output.append(self.safe_attr_name(attr_name)) + self.output.append('=') + self._handle_attr_value(attr_value) + + self.output.append(')') + if call: + # start call to __call__ + self.output.append('(') + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + + def handle_endtag(self, tag_name, call=True): + if call: + # finish call to __call__ + self.output.append(")") + + assert self.open_tags, "got but tag stack empty; parsing should be over!" % tag_name + + open_tag = self.open_tags.pop() + if open_tag['tag'] != tag_name: + raise ParseError("<%s> on line %d closed by on line %d" % + (open_tag['tag'], open_tag['row'], tag_name, self.end[0])) + + if open_tag['tag'] == 'if': + self.output.append(',html._leave_if()') + self.last_thing_was_close_if_tag = True + else: + self.last_thing_was_close_if_tag = False + + if len(self.open_tags): + self.output.append(",") + self.last_thing_was_python = False + + def handle_startendtag(self, tag_name, attrs): + self.handle_starttag(tag_name, attrs, call=False) + self.handle_endtag(tag_name, call=False) + + def handle_data(self, data): + data = self._normalize_data_whitespace( + data, self.last_thing_was_python, self.next_thing_is_python) + if not data: + return + + # XXX XXX mimics old pyxl, but this is gross and likely wrong. I'm pretty sure we actually + # want %r instead of this crazy quote substitution and u"%s". + data = data.replace('"', '\\"') + self.output.append('html.rawhtml(u"%s"), ' % data) + + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + + def handle_comment(self, data): + self.handle_startendtag("html_comment", {"comment": [data.strip()]}) + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + + def handle_doctype(self, data): + self.handle_startendtag("html_decl", {"decl": ['DOCTYPE ' + data]}) + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False + + def handle_cdata(self, data): + self.handle_startendtag("html_marked_decl", {"decl": ['CDATA[' + data]}) + self.last_thing_was_python = False + self.last_thing_was_close_if_tag = False diff --git a/lib/pyxl/pyxl/codec/pytokenize.py b/lib/pyxl/pyxl/codec/pytokenize.py new file mode 100644 --- /dev/null +++ b/lib/pyxl/pyxl/codec/pytokenize.py @@ -0,0 +1,468 @@ +"""Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found. + + +This file was taken from the python 2.7.4 library and modified for use by +the Pyxl decoder. Changes made: + - When it encounters an unexpected EOF, the tokenizer does not raise an + exception, and instead yields an errortoken if appropriate. + - When it encounters an unexpected dedent, the tokenizer does not + raise an exception. + - The Untokenizer class was heavily modified. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained +in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. +""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger') + +import string, re +from token import * + +import token +__all__ = [x for x in dir(token) if not x.startswith("_")] +__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] +del x +del token + +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +N_TOKENS += 2 + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'[a-zA-Z_]\w*' + +Hexnumber = r'0[xX][\da-fA-F]+[lL]?' +Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?' +Binnumber = r'0[bB][01]+[lL]?' +Decnumber = r'[1-9]\d*[lL]?' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?\d+' +Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) +Expfloat = r'\d+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", + r"//=?", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'[:;.,`@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +tokenprog, pseudoprog, single3prog, double3prog = map( + re.compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": re.compile(Single), '"': re.compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "u'''": single3prog, 'u"""': double3prog, + "ur'''": single3prog, 'ur"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "U'''": single3prog, 'U"""': double3prog, + "uR'''": single3prog, 'uR"""': double3prog, + "Ur'''": single3prog, 'Ur"""': double3prog, + "UR'''": single3prog, 'UR"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, 'u': None, 'U': None, + 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', + "ur'''", 'ur"""', "Ur'''", 'Ur"""', + "uR'''", 'uR"""', "UR'''", 'UR"""', + "b'''", 'b"""', "B'''", 'B"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""'): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', + "ur'", 'ur"', "Ur'", 'Ur"', + "uR'", 'uR"', "UR'", 'UR"', + "b'", 'b"', "B'", 'B"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"' ): + single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + +def printtoken(type, token, srow_scol, erow_ecol, line): # for testing + srow, scol = srow_scol + erow, ecol = erow_ecol + print "%d,%d-%d,%d:\t%s\t%s" % \ + (srow, scol, erow, ecol, tok_name[type], repr(token)) + +def tokenize(readline, tokeneater=printtoken): + """ + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + """ + try: + tokenize_loop(readline, tokeneater) + except StopTokenizing: + pass + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): + for token_info in generate_tokens(readline): + tokeneater(*token_info) + +class Untokenizer: + + # PYXL MODIFICATION: This entire class. + + def __init__(self, row=None, col=None): + self.tokens = [] + self.prev_row = row + self.prev_col = col + + def add_whitespace(self, start): + row, col = start + assert row >= self.prev_row, "row (%r) should be >= prev_row (%r)" % (row, self.prev_row) + row_offset = row - self.prev_row + if row_offset: + self.tokens.append("\n" * row_offset) + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def feed(self, t): + assert len(t) == 5 + tok_type, token, start, end, line = t + if (self.prev_row is None): + self.prev_row, self.prev_col = start + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + + def finish(self): + return "".join(self.tokens) + + def untokenize(self, iterable): + for t in iterable: + self.feed(t) + return self.finish() + +def untokenize(iterable): + """Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tok in generate_tokens(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + return ut.untokenize(iterable) + +def generate_tokens(readline): + """ + The generate_tokens() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and From pypy.commits at gmail.com Wed Jan 20 16:35:30 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 20 Jan 2016 13:35:30 -0800 (PST) Subject: [pypy-commit] benchmarks default: add some other pyston benchmarks Message-ID: <569ffda2.520e1c0a.1e25f.56dd@mx.google.com> Author: fijal Branch: Changeset: r346:947a8b49e3cd Date: 2016-01-20 22:31 +0100 http://bitbucket.org/pypy/benchmarks/changeset/947a8b49e3cd/ Log: add some other pyston benchmarks diff too long, truncating to 2000 out of 513606 lines diff --git a/benchmarks.py b/benchmarks.py --- a/benchmarks.py +++ b/benchmarks.py @@ -66,6 +66,11 @@ 'bm_dulwich_log': {'bm_env': {'PYTHONPATH': relative('lib/dulwich-0.9.1')}}, 'bm_chameleon': {'bm_env': {'PYTHONPATH': relative('lib/chameleon/src')}, 'iteration_scaling': 3}, + 'nqueens': {'iteration_scaling': .1}, + 'sqlalchemy_declarative': {'bm_env': {'PYTHONPATH': relative('lib/sqlalchemy/lib')}, + 'iteration_scaling': 3}, + 'sqlalchemy_imperative': {'bm_env': {'PYTHONPATH': relative('lib/sqlalchemy/lib')}, + 'iteration_scaling': 10}, } for name in ['expand', 'integrate', 'sum', 'str']: @@ -84,7 +89,8 @@ 'raytrace-simple', 'crypto_pyaes', 'bm_mako', 'bm_chameleon', 'json_bench', 'pidigits', 'hexiom2', 'eparse', 'deltablue', 'bm_dulwich_log', 'bm_krakatau', 'bm_mdp', 'pypy_interp', - 'sqlitesynth', 'pyxl_bench']: + 'sqlitesynth', 'pyxl_bench', 'nqueens', 'sqlalchemy_declarative', + 'sqlalchemy_imperative']: _register_new_bm(name, name, globals(), **opts.get(name, {})) for name in ['names', 'iteration', 'tcp', 'pb', ]:#'web']:#, 'accepts']: diff --git a/lib/sqlalchemy/AUTHORS b/lib/sqlalchemy/AUTHORS new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/AUTHORS @@ -0,0 +1,18 @@ +SQLAlchemy was created by Michael Bayer. + +Major contributing authors include: + +- Michael Bayer +- Jason Kirtland +- Gaetan de Menten +- Diana Clarke +- Michael Trier +- Philip Jenvey +- Ants Aasma +- Paul Johnston +- Jonathan Ellis + +For a larger list of SQLAlchemy contributors over time, see: + +http://www.sqlalchemy.org/trac/wiki/Contributors + diff --git a/lib/sqlalchemy/CHANGES b/lib/sqlalchemy/CHANGES new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/CHANGES @@ -0,0 +1,16 @@ +===== +MOVED +===== + +Please see: + + /doc/changelog/index.html + +or + + http://www.sqlalchemy.org/docs/latest/changelog/ + +for an index of all changelogs. + + + diff --git a/lib/sqlalchemy/LICENSE b/lib/sqlalchemy/LICENSE new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/LICENSE @@ -0,0 +1,20 @@ +This is the MIT license: http://www.opensource.org/licenses/mit-license.php + +Copyright (c) 2005-2015 the SQLAlchemy authors and contributors . +SQLAlchemy is a trademark of Michael Bayer. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/lib/sqlalchemy/MANIFEST.in b/lib/sqlalchemy/MANIFEST.in new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/MANIFEST.in @@ -0,0 +1,13 @@ +# any kind of "*" pulls in __init__.pyc files, +# so all extensions are explicit. + +recursive-include doc *.html *.css *.txt *.js *.jpg *.png *.py Makefile *.rst *.mako *.sty +recursive-include examples *.py *.xml +recursive-include test *.py *.dat + +# include the c extensions, which otherwise +# don't come in if --with-cextensions isn't specified. +recursive-include lib *.c *.txt + +include README* AUTHORS LICENSE distribute_setup.py sa2to3.py ez_setup.py sqla_nose.py CHANGES* tox.ini +prune doc/build/output diff --git a/lib/sqlalchemy/PKG-INFO b/lib/sqlalchemy/PKG-INFO new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/PKG-INFO @@ -0,0 +1,155 @@ +Metadata-Version: 1.1 +Name: SQLAlchemy +Version: 1.0.11 +Summary: Database Abstraction Library +Home-page: http://www.sqlalchemy.org +Author: Mike Bayer +Author-email: mike_mp at zzzcomputing.com +License: MIT License +Description: SQLAlchemy + ========== + + The Python SQL Toolkit and Object Relational Mapper + + Introduction + ------------- + + SQLAlchemy is the Python SQL toolkit and Object Relational Mapper + that gives application developers the full power and + flexibility of SQL. SQLAlchemy provides a full suite + of well known enterprise-level persistence patterns, + designed for efficient and high-performing database + access, adapted into a simple and Pythonic domain + language. + + Major SQLAlchemy features include: + + * An industrial strength ORM, built + from the core on the identity map, unit of work, + and data mapper patterns. These patterns + allow transparent persistence of objects + using a declarative configuration system. + Domain models + can be constructed and manipulated naturally, + and changes are synchronized with the + current transaction automatically. + * A relationally-oriented query system, exposing + the full range of SQL's capabilities + explicitly, including joins, subqueries, + correlation, and most everything else, + in terms of the object model. + Writing queries with the ORM uses the same + techniques of relational composition you use + when writing SQL. While you can drop into + literal SQL at any time, it's virtually never + needed. + * A comprehensive and flexible system + of eager loading for related collections and objects. + Collections are cached within a session, + and can be loaded on individual access, all + at once using joins, or by query per collection + across the full result set. + * A Core SQL construction system and DBAPI + interaction layer. The SQLAlchemy Core is + separate from the ORM and is a full database + abstraction layer in its own right, and includes + an extensible Python-based SQL expression + language, schema metadata, connection pooling, + type coercion, and custom types. + * All primary and foreign key constraints are + assumed to be composite and natural. Surrogate + integer primary keys are of course still the + norm, but SQLAlchemy never assumes or hardcodes + to this model. + * Database introspection and generation. Database + schemas can be "reflected" in one step into + Python structures representing database metadata; + those same structures can then generate + CREATE statements right back out - all within + the Core, independent of the ORM. + + SQLAlchemy's philosophy: + + * SQL databases behave less and less like object + collections the more size and performance start to + matter; object collections behave less and less like + tables and rows the more abstraction starts to matter. + SQLAlchemy aims to accommodate both of these + principles. + * An ORM doesn't need to hide the "R". A relational + database provides rich, set-based functionality + that should be fully exposed. SQLAlchemy's + ORM provides an open-ended set of patterns + that allow a developer to construct a custom + mediation layer between a domain model and + a relational schema, turning the so-called + "object relational impedance" issue into + a distant memory. + * The developer, in all cases, makes all decisions + regarding the design, structure, and naming conventions + of both the object model as well as the relational + schema. SQLAlchemy only provides the means + to automate the execution of these decisions. + * With SQLAlchemy, there's no such thing as + "the ORM generated a bad query" - you + retain full control over the structure of + queries, including how joins are organized, + how subqueries and correlation is used, what + columns are requested. Everything SQLAlchemy + does is ultimately the result of a developer- + initiated decision. + * Don't use an ORM if the problem doesn't need one. + SQLAlchemy consists of a Core and separate ORM + component. The Core offers a full SQL expression + language that allows Pythonic construction + of SQL constructs that render directly to SQL + strings for a target database, returning + result sets that are essentially enhanced DBAPI + cursors. + * Transactions should be the norm. With SQLAlchemy's + ORM, nothing goes to permanent storage until + commit() is called. SQLAlchemy encourages applications + to create a consistent means of delineating + the start and end of a series of operations. + * Never render a literal value in a SQL statement. + Bound parameters are used to the greatest degree + possible, allowing query optimizers to cache + query plans effectively and making SQL injection + attacks a non-issue. + + Documentation + ------------- + + Latest documentation is at: + + http://www.sqlalchemy.org/docs/ + + Installation / Requirements + --------------------------- + + Full documentation for installation is at + `Installation `_. + + Getting Help / Development / Bug reporting + ------------------------------------------ + + Please refer to the `SQLAlchemy Community Guide `_. + + License + ------- + + SQLAlchemy is distributed under the `MIT license + `_. + + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Database :: Front-Ends +Classifier: Operating System :: OS Independent diff --git a/lib/sqlalchemy/README.dialects.rst b/lib/sqlalchemy/README.dialects.rst new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/README.dialects.rst @@ -0,0 +1,262 @@ +======================== +Developing new Dialects +======================== + +.. note:: + + When studying this file, it's probably a good idea to also + familiarize with the README.unittests.rst file, which discusses + SQLAlchemy's usage and extension of the Nose test runner. + +While SQLAlchemy includes many dialects within the core distribution, the +trend for new dialects should be that they are published as external +projects. SQLAlchemy has since version 0.5 featured a "plugin" system +which allows external dialects to be integrated into SQLAlchemy using +standard setuptools entry points. As of version 0.8, this system has +been enhanced, so that a dialect can also be "plugged in" at runtime. + +On the testing side, SQLAlchemy as of 0.8 also includes a "dialect +compliance suite" that is usable by third party libraries. There is no +longer a strong need for a new dialect to run through SQLAlchemy's full +testing suite, as a large portion of these tests do not have +dialect-sensitive functionality. The "dialect compliance suite" should +be viewed as the primary target for new dialects, and as it continues +to grow and mature it should become a more thorough and efficient system +of testing new dialects. + +As of SQLAlchemy 0.9.4, both nose and pytest are supported for running tests, +and pytest is now preferred. + +Dialect Layout +=============== + +The file structure of a dialect is typically similar to the following:: + + sqlalchemy-/ + setup.py + setup.cfg + run_tests.py + sqlalchemy_/ + __init__.py + base.py + .py + requirements.py + test/ + conftest.py + __init__.py + test_suite.py + test_.py + ... + +An example of this structure can be seen in the Access dialect at +https://bitbucket.org/zzzeek/sqlalchemy-access/. + +Key aspects of this file layout include: + +* setup.py - should specify setuptools entrypoints, allowing the + dialect to be usable from create_engine(), e.g.:: + + entry_points={ + 'sqlalchemy.dialects': [ + 'access = sqlalchemy_access.pyodbc:AccessDialect_pyodbc', + 'access.pyodbc = sqlalchemy_access.pyodbc:AccessDialect_pyodbc', + ] + } + + Above, the two entrypoints ``access`` and ``access.pyodbc`` allow URLs to be + used such as:: + + create_engine("access://user:pw at dsn") + + create_engine("access+pyodbc://user:pw at dsn") + +* setup.cfg - this file contains the traditional contents such as [egg_info], + [pytest] and [nosetests] directives, but also contains new directives that are used + by SQLAlchemy's testing framework. E.g. for Access:: + + [egg_info] + tag_build = dev + + [pytest] + addopts= --tb native -v -r fxX + python_files=test/*test_*.py + + [nosetests] + with-sqla_testing = true + where = test + cover-package = sqlalchemy_access + with-coverage = 1 + cover-erase = 1 + + [sqla_testing] + requirement_cls=sqlalchemy_access.requirements:Requirements + profile_file=.profiles.txt + + [db] + default=access+pyodbc://admin at access_test + sqlite=sqlite:///:memory: + + Above, the ``[sqla_testing]`` section contains configuration used by + SQLAlchemy's test plugin. The ``[pytest]`` and ``[nosetests]`` sections + include directives to help with these runners; in the case of + Nose, the directive ``with-sql_testing = true``, which indicates to Nose that + the SQLAlchemy nose plugin should be used. In the case of pytest, the + test/conftest.py file will bootstrap SQLAlchemy's plugin. + +* test/conftest.py - This script bootstraps SQLAlchemy's pytest plugin + into the pytest runner. This + script can also be used to install your third party dialect into + SQLAlchemy without using the setuptools entrypoint system; this allows + your dialect to be present without any explicit setup.py step needed. + The other portion invokes SQLAlchemy's pytest plugin:: + + from sqlalchemy.dialects import registry + + registry.register("access", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") + registry.register("access.pyodbc", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") + + from sqlalchemy.testing.plugin.pytestplugin import * + + Where above, the ``registry`` module, introduced in SQLAlchemy 0.8, provides + an in-Python means of installing the dialect entrypoints without the use + of setuptools, using the ``registry.register()`` function in a way that + is similar to the ``entry_points`` directive we placed in our ``setup.py``. + +* run_tests.py - This script is used when running the tests via Nose. + The purpose of the script is to plug in SQLAlchemy's nose plugin into + the Nose environment before the tests run. + + The format of this file is similar to that of conftest.py; first, + the optional but helpful step of registering your third party plugin, + then the other is to import SQLAlchemy's nose runner and invoke it:: + + from sqlalchemy.dialects import registry + + registry.register("access", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") + registry.register("access.pyodbc", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") + + from sqlalchemy.testing import runner + + # use this in setup.py 'test_suite': + # test_suite="run_tests.setup_py_test" + def setup_py_test(): + runner.setup_py_test() + + if __name__ == '__main__': + runner.main() + + The call to ``runner.main()`` then runs the Nose front end, which installs + SQLAlchemy's testing plugins. Invoking our custom runner looks like the + following:: + + $ python run_tests.py -v + +* requirements.py - The ``requirements.py`` file is where directives + regarding database and dialect capabilities are set up. + SQLAlchemy's tests are often annotated with decorators that mark + tests as "skip" or "fail" for particular backends. Over time, this + system has been refined such that specific database and DBAPI names + are mentioned less and less, in favor of @requires directives which + state a particular capability. The requirement directive is linked + to target dialects using a ``Requirements`` subclass. The custom + ``Requirements`` subclass is specified in the ``requirements.py`` file + and is made available to SQLAlchemy's test runner using the + ``requirement_cls`` directive inside the ``[sqla_testing]`` section. + + For a third-party dialect, the custom ``Requirements`` class can + usually specify a simple yes/no answer for a particular system. For + example, a requirements file that specifies a database that supports + the RETURNING construct but does not support reflection of tables + might look like this:: + + # sqlalchemy_access/requirements.py + + from sqlalchemy.testing.requirements import SuiteRequirements + + from sqlalchemy.testing import exclusions + + class Requirements(SuiteRequirements): + @property + def table_reflection(self): + return exclusions.closed() + + @property + def returning(self): + return exclusions.open() + + The ``SuiteRequirements`` class in + ``sqlalchemy.testing.requirements`` contains a large number of + requirements rules, which attempt to have reasonable defaults. The + tests will report on those requirements found as they are run. + + The requirements system can also be used when running SQLAlchemy's + primary test suite against the external dialect. In this use case, + a ``--dburi`` as well as a ``--requirements`` flag are passed to SQLAlchemy's + main test runner ``./sqla_nose.py`` so that exclusions specific to the + dialect take place:: + + cd /path/to/sqlalchemy + py.test -v \ + --requirements sqlalchemy_access.requirements:Requirements \ + --dburi access+pyodbc://admin at access_test + +* test_suite.py - Finally, the ``test_suite.py`` module represents a + stub test suite, which pulls in the actual SQLAlchemy test suite. + To pull in the suite as a whole, it can be imported in one step:: + + # test/test_suite.py + + from sqlalchemy.testing.suite import * + + That's all that's needed - the ``sqlalchemy.testing.suite`` package + contains an ever expanding series of tests, most of which should be + annotated with specific requirement decorators so that they can be + fully controlled. To specifically modify some of the tests, they can + be imported by name and subclassed:: + + from sqlalchemy.testing.suite import * + + from sqlalchemy.testing.suite import ComponentReflectionTest as _ComponentReflectionTest + + class ComponentReflectionTest(_ComponentReflectionTest): + @classmethod + def define_views(cls, metadata, schema): + # bypass the "define_views" section of the + # fixture + return + +Going Forward +============== + +The third-party dialect can be distributed like any other Python +module on Pypi. Links to prominent dialects can be featured within +SQLAlchemy's own documentation; contact the developers (see AUTHORS) +for help with this. + +While SQLAlchemy includes many dialects built in, it remains to be +seen if the project as a whole might move towards "plugin" model for +all dialects, including all those currently built in. Now that +SQLAlchemy's dialect API is mature and the test suite is not far +behind, it may be that a better maintenance experience can be +delivered by having all dialects separately maintained and released. + +As new versions of SQLAlchemy are released, the test suite and +requirements file will receive new tests and changes. The dialect +maintainer would normally keep track of these changes and make +adjustments as needed. + +Continuous Integration +====================== + +The most ideal scenario for ongoing dialect testing is continuous +integration, that is, an automated test runner that runs in response +to changes not just in the dialect itself but to new pushes to +SQLAlchemy as well. + +The SQLAlchemy project features a Jenkins installation that runs tests +on Amazon EC2 instances. It is possible for third-party dialect +developers to provide the SQLAlchemy project either with AMIs or EC2 +instance keys which feature test environments appropriate to the +dialect - SQLAlchemy's own Jenkins suite can invoke tests on these +environments. Contact the developers for further info. + diff --git a/lib/sqlalchemy/README.rst b/lib/sqlalchemy/README.rst new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/README.rst @@ -0,0 +1,135 @@ +SQLAlchemy +========== + +The Python SQL Toolkit and Object Relational Mapper + +Introduction +------------- + +SQLAlchemy is the Python SQL toolkit and Object Relational Mapper +that gives application developers the full power and +flexibility of SQL. SQLAlchemy provides a full suite +of well known enterprise-level persistence patterns, +designed for efficient and high-performing database +access, adapted into a simple and Pythonic domain +language. + +Major SQLAlchemy features include: + +* An industrial strength ORM, built + from the core on the identity map, unit of work, + and data mapper patterns. These patterns + allow transparent persistence of objects + using a declarative configuration system. + Domain models + can be constructed and manipulated naturally, + and changes are synchronized with the + current transaction automatically. +* A relationally-oriented query system, exposing + the full range of SQL's capabilities + explicitly, including joins, subqueries, + correlation, and most everything else, + in terms of the object model. + Writing queries with the ORM uses the same + techniques of relational composition you use + when writing SQL. While you can drop into + literal SQL at any time, it's virtually never + needed. +* A comprehensive and flexible system + of eager loading for related collections and objects. + Collections are cached within a session, + and can be loaded on individual access, all + at once using joins, or by query per collection + across the full result set. +* A Core SQL construction system and DBAPI + interaction layer. The SQLAlchemy Core is + separate from the ORM and is a full database + abstraction layer in its own right, and includes + an extensible Python-based SQL expression + language, schema metadata, connection pooling, + type coercion, and custom types. +* All primary and foreign key constraints are + assumed to be composite and natural. Surrogate + integer primary keys are of course still the + norm, but SQLAlchemy never assumes or hardcodes + to this model. +* Database introspection and generation. Database + schemas can be "reflected" in one step into + Python structures representing database metadata; + those same structures can then generate + CREATE statements right back out - all within + the Core, independent of the ORM. + +SQLAlchemy's philosophy: + +* SQL databases behave less and less like object + collections the more size and performance start to + matter; object collections behave less and less like + tables and rows the more abstraction starts to matter. + SQLAlchemy aims to accommodate both of these + principles. +* An ORM doesn't need to hide the "R". A relational + database provides rich, set-based functionality + that should be fully exposed. SQLAlchemy's + ORM provides an open-ended set of patterns + that allow a developer to construct a custom + mediation layer between a domain model and + a relational schema, turning the so-called + "object relational impedance" issue into + a distant memory. +* The developer, in all cases, makes all decisions + regarding the design, structure, and naming conventions + of both the object model as well as the relational + schema. SQLAlchemy only provides the means + to automate the execution of these decisions. +* With SQLAlchemy, there's no such thing as + "the ORM generated a bad query" - you + retain full control over the structure of + queries, including how joins are organized, + how subqueries and correlation is used, what + columns are requested. Everything SQLAlchemy + does is ultimately the result of a developer- + initiated decision. +* Don't use an ORM if the problem doesn't need one. + SQLAlchemy consists of a Core and separate ORM + component. The Core offers a full SQL expression + language that allows Pythonic construction + of SQL constructs that render directly to SQL + strings for a target database, returning + result sets that are essentially enhanced DBAPI + cursors. +* Transactions should be the norm. With SQLAlchemy's + ORM, nothing goes to permanent storage until + commit() is called. SQLAlchemy encourages applications + to create a consistent means of delineating + the start and end of a series of operations. +* Never render a literal value in a SQL statement. + Bound parameters are used to the greatest degree + possible, allowing query optimizers to cache + query plans effectively and making SQL injection + attacks a non-issue. + +Documentation +------------- + +Latest documentation is at: + +http://www.sqlalchemy.org/docs/ + +Installation / Requirements +--------------------------- + +Full documentation for installation is at +`Installation `_. + +Getting Help / Development / Bug reporting +------------------------------------------ + +Please refer to the `SQLAlchemy Community Guide `_. + +License +------- + +SQLAlchemy is distributed under the `MIT license +`_. + diff --git a/lib/sqlalchemy/README.unittests.rst b/lib/sqlalchemy/README.unittests.rst new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/README.unittests.rst @@ -0,0 +1,340 @@ +===================== +SQLALCHEMY UNIT TESTS +===================== + +**NOTE:** SQLAlchemy as of 0.9.4 now standardizes on `pytest `_ +for test running! However, the existing support for Nose **still remains**! +That is, you can now run the tests via pytest or nose. We hope to keep the +suite nose-compatible indefinitely however this might change at some point. + +SQLAlchemy unit tests by default run using Python's built-in sqlite3 +module. If running on a Python installation that doesn't include this +module, then pysqlite or compatible must be installed. + +Unit tests can be run with pytest or nose: + + py.test: http://pytest.org/ + + nose: https://pypi.python.org/pypi/nose/ + +The suite includes enhanced support when running with pytest. + +SQLAlchemy implements plugins for both pytest and nose that must be +present when tests are run. In the case of pytest, this plugin is automatically +used when pytest is run against the SQLAlchemy source tree. However, +for Nose support, a special test runner script must be used. + + +The test suite as also requires the mock library. While +mock is part of the Python standard library as of 3.3, previous versions +will need to have it installed, and is available at:: + + https://pypi.python.org/pypi/mock + +RUNNING TESTS VIA SETUP.PY +-------------------------- +A plain vanilla run of all tests using sqlite can be run via setup.py, and +requires that pytest is installed:: + + $ python setup.py test + + +RUNNING ALL TESTS - PYTEST +-------------------------- +To run all tests:: + + $ py.test + +The pytest configuration in setup.cfg will point the runner at the +test/ directory, where it consumes a conftest.py file that gets everything +else up and running. + + +RUNNING ALL TESTS - NOSE +-------------------------- + +When using Nose, a bootstrap script is provided which sets up sys.path +as well as installs the nose plugin:: + + $ ./sqla_nose.py + +Assuming all tests pass, this is a very unexciting output. To make it more +interesting:: + + $ ./sqla_nose.py -v + +RUNNING INDIVIDUAL TESTS +--------------------------------- + +Any directory of test modules can be run at once by specifying the directory +path, and a specific file can be specified as well:: + + $ py.test test/dialect + + $ py.test test/orm/test_mapper.py + +When using nose, the setup.cfg currently sets "where" to "test/", so the +"test/" prefix is omitted:: + + $ ./sqla_nose.py dialect/ + + $ ./sqla_nose.py orm/test_mapper.py + +With Nose, it is often more intuitive to specify tests as module paths:: + + $ ./sqla_nose.py test.orm.test_mapper + +Nose can also specify a test class and optional method using this syntax:: + + $ ./sqla_nose.py test.orm.test_mapper:MapperTest.test_utils + +With pytest, the -k flag is used to limit tests:: + + $ py.test test/orm/test_mapper.py -k "MapperTest and test_utils" + + +COMMAND LINE OPTIONS +-------------------- + +SQLAlchemy-specific options are added to both runners, which are viewable +within the help screen. With pytest, these options are easier to locate +as they are underneath the "sqlalchemy" grouping:: + + $ py.test --help + + $ ./sqla_nose.py --help + +The --help screen is a combination of common nose options and options which +the SQLAlchemy nose plugin adds. The most commonly SQLAlchemy-specific +options used are '--db' and '--dburi'. + +Both pytest and nose support the same set of SQLAlchemy options, though +pytest features a bit more capability with them. + + +DATABASE TARGETS +---------------- + +Tests will target an in-memory SQLite database by default. To test against +another database, use the --dburi option with any standard SQLAlchemy URL:: + + --dburi=postgresql://user:password at localhost/test + +If you'll be running the tests frequently, database aliases can save a lot of +typing. The --dbs option lists the built-in aliases and their matching URLs:: + + $ py.test --dbs + Available --db options (use --dburi to override) + mysql mysql://scott:tiger at 127.0.0.1:3306/test + oracle oracle://scott:tiger at 127.0.0.1:1521 + postgresql postgresql://scott:tiger at 127.0.0.1:5432/test + [...] + +To run tests against an aliased database:: + + $ py.test --db postgresql + +This list of database urls is present in the setup.cfg file. The list +can be modified/extended by adding a file ``test.cfg`` at the +top level of the SQLAlchemy source distribution which includes +additional entries:: + + [db] + postgresql=postgresql://myuser:mypass at localhost/mydb + +Your custom entries will override the defaults and you'll see them reflected +in the output of --dbs. + +MULTIPLE DATABASE TARGETS +------------------------- + +As of SQLAlchemy 0.9.4, the test runner supports **multiple databases at once**. +This doesn't mean that the entire test suite runs for each database, but +instead specific test suites may do so, while other tests may choose to +run on a specific target out of those available. For example, if the tests underneath +test/dialect/ are run, the majority of these tests are either specific to +a particular backend, or are marked as "multiple", meaning they will run repeatedly +for each database in use. If one runs the test suite as follows:: + + $ py.test test/dialect --db sqlite --db postgresql --db mysql + +The tests underneath test/dialect/test_suite.py will be tripled up, running +as appropriate for each target database, whereas dialect-specific tests +within test/dialect/mysql, test/dialect/postgresql/ test/dialect/test_sqlite.py +should run fully with no skips, as each suite has its target database available. + +The multiple targets feature is available both under pytest and nose, +however when running nose, the "multiple runner" feature won't be available; +instead, the first database target will be used. + +When running with multiple targets, tests that don't prefer a specific target +will be run against the first target specified. Putting sqlite first in +the list will lead to a much faster suite as the in-memory database is +extremely fast for setting up and tearing down tables. + + + +DATABASE CONFIGURATION +---------------------- + +Use an empty database and a database user with general DBA privileges. +The test suite will be creating and dropping many tables and other DDL, and +preexisting tables will interfere with the tests. + +Several tests require alternate usernames or schemas to be present, which +are used to test dotted-name access scenarios. On some databases such +as Oracle or Sybase, these are usernames, and others such as Postgresql +and MySQL they are schemas. The requirement applies to all backends +except SQLite and Firebird. The names are:: + + test_schema + test_schema_2 (only used on Postgresql) + +Please refer to your vendor documentation for the proper syntax to create +these namespaces - the database user must have permission to create and drop +tables within these schemas. Its perfectly fine to run the test suite +without these namespaces present, it only means that a handful of tests which +expect them to be present will fail. + +Additional steps specific to individual databases are as follows:: + + POSTGRESQL: To enable unicode testing with JSONB, create the + database with UTF8 encoding:: + + postgres=# create database test with owner=scott encoding='utf8' template=template0; + + To include tests for HSTORE, create the HSTORE type engine:: + + postgres=# \c test; + You are now connected to database "test" as user "postgresql". + test=# create extension hstore; + CREATE EXTENSION + + MYSQL: Default storage engine should be "MyISAM". Tests that require + "InnoDB" as the engine will specify this explicitly. + + ORACLE: a user named "test_schema" is created. + + The primary database user needs to be able to create and drop tables, + synonyms, and constraints within the "test_schema" user. For this + to work fully, including that the user has the "REFERENCES" role + in a remote schema for tables not yet defined (REFERENCES is per-table), + it is required that the test the user be present in the "DBA" role: + + grant dba to scott; + + SYBASE: Similar to Oracle, "test_schema" is created as a user, and the + primary test user needs to have the "sa_role". + + It's also recommended to turn on "trunc log on chkpt" and to use a + separate transaction log device - Sybase basically seizes up when + the transaction log is full otherwise. + + A full series of setup assuming sa/master: + + disk init name="translog", physname="/opt/sybase/data/translog.dat", size="10M" + create database sqlalchemy on default log on translog="10M" + sp_dboption sqlalchemy, "trunc log on chkpt", true + sp_addlogin scott, "tiger7" + sp_addlogin test_schema, "tiger7" + use sqlalchemy + sp_adduser scott + sp_adduser test_schema + grant all to scott + sp_role "grant", sa_role, scott + + Sybase will still freeze for up to a minute when the log becomes + full. To manually dump the log:: + + dump tran sqlalchemy with truncate_only + + MSSQL: Tests that involve multiple connections require Snapshot Isolation + ability implemented on the test database in order to prevent deadlocks that + will occur with record locking isolation. This feature is only available + with MSSQL 2005 and greater. You must enable snapshot isolation at the + database level and set the default cursor isolation with two SQL commands: + + ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON + + ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON + + MSSQL+zxJDBC: Trying to run the unit tests on Windows against SQL Server + requires using a test.cfg configuration file as the cmd.exe shell won't + properly pass the URL arguments into the nose test runner. + + POSTGRESQL: Full-text search configuration should be set to English, else + several tests of ``.match()`` will fail. This can be set (if it isn't so + already) with: + + ALTER DATABASE test SET default_text_search_config = 'pg_catalog.english' + + +CONFIGURING LOGGING +------------------- +SQLAlchemy logs its activity and debugging through Python's logging package. +Any log target can be directed to the console with command line options, such +as:: + + $ ./sqla_nose.py test.orm.unitofwork --log-info=sqlalchemy.orm.mapper \ + --log-debug=sqlalchemy.pool --log-info=sqlalchemy.engine + +This would log mapper configuration, connection pool checkouts, and SQL +statement execution. + + +BUILT-IN COVERAGE REPORTING +------------------------------ +Coverage is tracked using the coverage plugins built for pytest or nose:: + + $ py.test test/sql/test_query --cov=sqlalchemy + + $ ./sqla_nose.py test.sql.test_query --with-coverage + +BIG COVERAGE TIP !!! There is an issue where existing .pyc files may +store the incorrect filepaths, which will break the coverage system. If +coverage numbers are coming out as low/zero, try deleting all .pyc files. + +DEVELOPING AND TESTING NEW DIALECTS +----------------------------------- + +See the file README.dialects.rst for detail on dialects. + + +TESTING WITH MULTIPLE PYTHON VERSIONS USING TOX +----------------------------------------------- + +If you want to test across multiple versions of Python, you may find `tox +`_ useful. SQLAlchemy includes a tox.ini file:: + + tox -e full + +SQLAlchemy uses tox mostly for pre-fab testing configurations, to simplify +configuration of Jenkins jobs, and *not* for testing different Python +interpreters simultaneously. You can of course create whatever alternate +tox.ini file you want. + +Environments include:: + + "full" - runs a full py.test + + "coverage" - runs a py.test plus coverage, skipping memory/timing + intensive tests + + "pep8" - runs flake8 against the codebase (useful with --diff to check + against a patch) + + +PARALLEL TESTING +---------------- + +Parallel testing is supported using the Pytest xdist plugin. Supported +databases currently include sqlite, postgresql, and mysql. The username +for the database should have CREATE DATABASE and DROP DATABASE privileges. +After installing pytest-xdist, testing is run adding the -n option. +For example, to run against sqlite, mysql, postgresql with four processes:: + + tox -e -- -n 4 --db sqlite --db postgresql --db mysql + +Each backend has a different scheme for setting up the database. Postgresql +still needs the "test_schema" and "test_schema_2" schemas present, as the +parallel databases are created using the base database as a "template". diff --git a/lib/sqlalchemy/doc/_images/sqla_arch_small.png b/lib/sqlalchemy/doc/_images/sqla_arch_small.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..a1c09585ec8d45ee60ad356b315d5a4089421068 GIT binary patch [cut] diff --git a/lib/sqlalchemy/doc/_images/sqla_engine_arch.png b/lib/sqlalchemy/doc/_images/sqla_engine_arch.png new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f040a2cf317c0118bc9d5b12c458224dea4eb690 GIT binary patch [cut] diff --git a/lib/sqlalchemy/doc/_modules/examples/adjacency_list/adjacency_list.html b/lib/sqlalchemy/doc/_modules/examples/adjacency_list/adjacency_list.html new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/doc/_modules/examples/adjacency_list/adjacency_list.html @@ -0,0 +1,253 @@ + + + + + + + + + + examples.adjacency_list.adjacency_list + — + SQLAlchemy 1.0 Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + + + + + +
      +
      +
      + Release: 1.0.11 | Release Date: December 12, 2015 +
      + +

      SQLAlchemy 1.0 Documentation

      + +
      +
      + +
      + +
      + +
      + + +

      + Contents | + Index +

      + +
      + + +
      + + + +
      + +

      Source code for examples.adjacency_list.adjacency_list

      +from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
      +from sqlalchemy.orm import Session, relationship, backref,\
      +                                joinedload_all
      +from sqlalchemy.ext.declarative import declarative_base
      +from sqlalchemy.orm.collections import attribute_mapped_collection
      +
      +
      +Base = declarative_base()
      +
      +class TreeNode(Base):
      +    __tablename__ = 'tree'
      +    id = Column(Integer, primary_key=True)
      +    parent_id = Column(Integer, ForeignKey(id))
      +    name = Column(String(50), nullable=False)
      +
      +    children = relationship("TreeNode",
      +
      +                        # cascade deletions
      +                        cascade="all, delete-orphan",
      +
      +                        # many to one + adjacency list - remote_side
      +                        # is required to reference the 'remote'
      +                        # column in the join condition.
      +                        backref=backref("parent", remote_side=id),
      +
      +                        # children will be represented as a dictionary
      +                        # on the "name" attribute.
      +                        collection_class=attribute_mapped_collection('name'),
      +                    )
      +
      +    def __init__(self, name, parent=None):
      +        self.name = name
      +        self.parent = parent
      +
      +    def __repr__(self):
      +        return "TreeNode(name=%r, id=%r, parent_id=%r)" % (
      +                    self.name,
      +                    self.id,
      +                    self.parent_id
      +                )
      +
      +    def dump(self, _indent=0):
      +        return "   " * _indent + repr(self) + \
      +                    "\n" + \
      +                    "".join([
      +                        c.dump(_indent + 1)
      +                        for c in self.children.values()]
      +                    )
      +
      +if __name__ == '__main__':
      +    engine = create_engine('sqlite://', echo=True)
      +
      +    def msg(msg, *args):
      +        msg = msg % args
      +        print("\n\n\n" + "-" * len(msg.split("\n")[0]))
      +        print(msg)
      +        print("-" * len(msg.split("\n")[0]))
      +
      +    msg("Creating Tree Table:")
      +
      +    Base.metadata.create_all(engine)
      +
      +    session = Session(engine)
      +
      +    node = TreeNode('rootnode')
      +    TreeNode('node1', parent=node)
      +    TreeNode('node3', parent=node)
      +
      +    node2 = TreeNode('node2')
      +    TreeNode('subnode1', parent=node2)
      +    node.children['node2'] = node2
      +    TreeNode('subnode2', parent=node.children['node2'])
      +
      +    msg("Created new tree structure:\n%s", node.dump())
      +
      +    msg("flush + commit:")
      +
      +    session.add(node)
      +    session.commit()
      +
      +    msg("Tree After Save:\n %s", node.dump())
      +
      +    TreeNode('node4', parent=node)
      +    TreeNode('subnode3', parent=node.children['node4'])
      +    TreeNode('subnode4', parent=node.children['node4'])
      +    TreeNode('subsubnode1', parent=node.children['node4'].children['subnode3'])
      +
      +    # remove node1 from the parent, which will trigger a delete
      +    # via the delete-orphan cascade.
      +    del node.children['node1']
      +
      +    msg("Removed node1.  flush + commit:")
      +    session.commit()
      +
      +    msg("Tree after save:\n %s", node.dump())
      +
      +    msg("Emptying out the session entirely, "
      +        "selecting tree on root, using eager loading to join four levels deep.")
      +    session.expunge_all()
      +    node = session.query(TreeNode).\
      +                        options(joinedload_all("children", "children",
      +                                                "children", "children")).\
      +                        filter(TreeNode.name == "rootnode").\
      +                        first()
      +
      +    msg("Full Tree:\n%s", node.dump())
      +
      +    msg("Marking root node as deleted, flush + commit:")
      +
      +    session.delete(node)
      +    session.commit()
      +
      + +
      + + + +
      + + + + + + + + + + + + + + + + + + + + + diff --git a/lib/sqlalchemy/doc/_modules/examples/association/basic_association.html b/lib/sqlalchemy/doc/_modules/examples/association/basic_association.html new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/doc/_modules/examples/association/basic_association.html @@ -0,0 +1,240 @@ + + + + + + + + + + examples.association.basic_association + — + SQLAlchemy 1.0 Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + + + + + +
      +
      +
      + Release: 1.0.11 | Release Date: December 12, 2015 +
      + +

      SQLAlchemy 1.0 Documentation

      + +
      +
      + +
      + +
      + +
      + + +

      + Contents | + Index +

      + +
      + + +
      + + + +
      + +

      Source code for examples.association.basic_association

      +"""basic_association.py
      +
      +illustrate a many-to-many relationship between an
      +"Order" and a collection of "Item" objects, associating a purchase price
      +with each via an association object called "OrderItem"
      +
      +The association object pattern is a form of many-to-many which
      +associates additional data with each association between parent/child.
      +
      +The example illustrates an "order", referencing a collection
      +of "items", with a particular price paid associated with each "item".
      +
      +"""
      +
      +from datetime import datetime
      +
      +from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
      +    String, DateTime, Float, ForeignKey, and_)
      +from sqlalchemy.orm import mapper, relationship, Session
      +from sqlalchemy.ext.declarative import declarative_base
      +
      +Base = declarative_base()
      +
      +class Order(Base):
      +    __tablename__ = 'order'
      +
      +    order_id = Column(Integer, primary_key=True)
      +    customer_name = Column(String(30), nullable=False)
      +    order_date = Column(DateTime, nullable=False, default=datetime.now())
      +    order_items = relationship("OrderItem", cascade="all, delete-orphan",
      +                            backref='order')
      +
      +    def __init__(self, customer_name):
      +        self.customer_name = customer_name
      +
      +class Item(Base):
      +    __tablename__ = 'item'
      +    item_id = Column(Integer, primary_key=True)
      +    description = Column(String(30), nullable=False)
      +    price = Column(Float, nullable=False)
      +
      +    def __init__(self, description, price):
      +        self.description = description
      +        self.price = price
      +
      +    def __repr__(self):
      +        return 'Item(%r, %r)' % (
      +                    self.description, self.price
      +                )
      +
      +class OrderItem(Base):
      +    __tablename__ = 'orderitem'
      +    order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True)
      +    item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True)
      +    price = Column(Float, nullable=False)
      +
      +    def __init__(self, item, price=None):
      +        self.item = item
      +        self.price = price or item.price
      +    item = relationship(Item, lazy='joined')
      +
      +if __name__ == '__main__':
      +    engine = create_engine('sqlite://')
      +    Base.metadata.create_all(engine)
      +
      +    session = Session(engine)
      +
      +    # create catalog
      +    tshirt, mug, hat, crowbar = (
      +        Item('SA T-Shirt', 10.99),
      +        Item('SA Mug', 6.50),
      +        Item('SA Hat', 8.99),
      +        Item('MySQL Crowbar', 16.99)
      +    )
      +    session.add_all([tshirt, mug, hat, crowbar])
      +    session.commit()
      +
      +    # create an order
      +    order = Order('john smith')
      +
      +    # add three OrderItem associations to the Order and save
      +    order.order_items.append(OrderItem(mug))
      +    order.order_items.append(OrderItem(crowbar, 10.99))
      +    order.order_items.append(OrderItem(hat))
      +    session.add(order)
      +    session.commit()
      +
      +    # query the order, print items
      +    order = session.query(Order).filter_by(customer_name='john smith').one()
      +    print([(order_item.item.description, order_item.price)
      +           for order_item in order.order_items])
      +
      +    # print customers who bought 'MySQL Crowbar' on sale
      +    q = session.query(Order).join('order_items', 'item')
      +    q = q.filter(and_(Item.description == 'MySQL Crowbar',
      +                      Item.price > OrderItem.price))
      +
      +    print([order.customer_name for order in q])
      +
      + +
      + + + +
      + + + + + + + + + + + + + + + + + + + + + diff --git a/lib/sqlalchemy/doc/_modules/examples/association/dict_of_sets_with_default.html b/lib/sqlalchemy/doc/_modules/examples/association/dict_of_sets_with_default.html new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/doc/_modules/examples/association/dict_of_sets_with_default.html @@ -0,0 +1,232 @@ + + + + + + + + + + examples.association.dict_of_sets_with_default + — + SQLAlchemy 1.0 Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + + + + + +
      +
      +
      + Release: 1.0.11 | Release Date: December 12, 2015 +
      + +

      SQLAlchemy 1.0 Documentation

      + +
      +
      + +
      + +
      + +
      + + +

      + Contents | + Index +

      + +
      + + +
      + + + +
      + +

      Source code for examples.association.dict_of_sets_with_default

      +"""dict_of_sets_with_default.py
      +
      +an advanced association proxy example which
      +illustrates nesting of association proxies to produce multi-level Python
      +collections, in this case a dictionary with string keys and sets of integers
      +as values, which conceal the underlying mapped classes.
      +
      +This is a three table model which represents a parent table referencing a
      +dictionary of string keys and sets as values, where each set stores a
      +collection of integers. The association proxy extension is used to hide the
      +details of this persistence. The dictionary also generates new collections
      +upon access of a non-existent key, in the same manner as Python's
      +"collections.defaultdict" object.
      +
      +"""
      +
      +from sqlalchemy import String, Integer, Column, create_engine, ForeignKey
      +from sqlalchemy.orm import relationship, Session
      +from sqlalchemy.orm.collections import MappedCollection
      +from sqlalchemy.ext.declarative import declarative_base
      +from sqlalchemy.ext.associationproxy import association_proxy
      +import operator
      +
      +class Base(object):
      +    id = Column(Integer, primary_key=True)
      +
      +Base = declarative_base(cls=Base)
      +
      +class GenDefaultCollection(MappedCollection):
      +    def __missing__(self, key):
      +        self[key] = b = B(key)
      +        return b
      +
      +class A(Base):
      +    __tablename__ = "a"
      +    associations = relationship("B",
      +        collection_class=lambda: GenDefaultCollection(operator.attrgetter("key"))
      +    )
      +
      +    collections = association_proxy("associations", "values")
      +    """Bridge the association from 'associations' over to the 'values'
      +    association proxy of B.
      +    """
      +
      +class B(Base):
      +    __tablename__ = "b"
      +    a_id = Column(Integer, ForeignKey("a.id"), nullable=False)
      +    elements = relationship("C", collection_class=set)
      +    key = Column(String)
      +
      +    values = association_proxy("elements", "value")
      +    """Bridge the association from 'elements' over to the
      +    'value' element of C."""
      +
      +    def __init__(self, key, values=None):
      +        self.key = key
      +        if values:
      +            self.values = values
      +
      +class C(Base):
      +    __tablename__ = "c"
      +    b_id = Column(Integer, ForeignKey("b.id"), nullable=False)
      +    value = Column(Integer)
      +    def __init__(self, value):
      +        self.value = value
      +
      +if __name__ == '__main__':
      +    engine = create_engine('sqlite://', echo=True)
      +    Base.metadata.create_all(engine)
      +    session = Session(engine)
      +
      +    # only "A" is referenced explicitly.  Using "collections",
      +    # we deal with a dict of key/sets of integers directly.
      +
      +    session.add_all([
      +        A(collections={
      +            "1": set([1, 2, 3]),
      +        })
      +    ])
      +    session.commit()
      +
      +    a1 = session.query(A).first()
      +    print(a1.collections["1"])
      +    a1.collections["1"].add(4)
      +    session.commit()
      +
      +    a1.collections["2"].update([7, 8, 9])
      +    session.commit()
      +
      +    print(a1.collections["2"])
      +
      + +
      + + + +
      + + + + + + + + + + + + + + + + + + + + + diff --git a/lib/sqlalchemy/doc/_modules/examples/association/proxied_association.html b/lib/sqlalchemy/doc/_modules/examples/association/proxied_association.html new file mode 100644 --- /dev/null +++ b/lib/sqlalchemy/doc/_modules/examples/association/proxied_association.html @@ -0,0 +1,247 @@ + + + + + + + + + + examples.association.proxied_association + — + SQLAlchemy 1.0 Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + + + + + +
      +
      +
      + Release: 1.0.11 | Release Date: December 12, 2015 +
      + +

      SQLAlchemy 1.0 Documentation

      + +
      +
      + +
      + +
      + +
      + + +

      + Contents | + Index +

      + +
      + + +
      + + + +
      + +

      Source code for examples.association.proxied_association

      +"""proxied_association.py
      +
      +same example as basic_association, adding in
      +usage of :mod:`sqlalchemy.ext.associationproxy` to make explicit references
      +to ``OrderItem`` optional.
      +
      +
      +"""
      +
      +from datetime import datetime
      +
      +from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
      +    String, DateTime, Float, ForeignKey, and_)
      +from sqlalchemy.orm import mapper, relationship, Session
      +from sqlalchemy.ext.declarative import declarative_base
      +from sqlalchemy.ext.associationproxy import association_proxy
      +
      +Base = declarative_base()
      +
      +class Order(Base):
      +    __tablename__ = 'order'
      +
      +    order_id = Column(Integer, primary_key=True)
      +    customer_name = Column(String(30), nullable=False)
      +    order_date = Column(DateTime, nullable=False, default=datetime.now())
      +    order_items = relationship("OrderItem", cascade="all, delete-orphan",
      +                            backref='order')
      +    items = association_proxy("order_items", "item")
      +
      +    def __init__(self, customer_name):
      +        self.customer_name = customer_name
      +
      +class Item(Base):
      +    __tablename__ = 'item'
      +    item_id = Column(Integer, primary_key=True)
      +    description = Column(String(30), nullable=False)
      +    price = Column(Float, nullable=False)
      +
      +    def __init__(self, description, price):
      +        self.description = description
      +        self.price = price
      +
      +    def __repr__(self):
      +        return 'Item(%r, %r)' % (
      +                    self.description, self.price
      +                )
      +
      +class OrderItem(Base):
      +    __tablename__ = 'orderitem'
      +    order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True)
      +    item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True)
      +    price = Column(Float, nullable=False)
      +
      +    def __init__(self, item, price=None):
      +        self.item = item
      +        self.price = price or item.price
      +    item = relationship(Item, lazy='joined')
      +
      +if __name__ == '__main__':
      +    engine = create_engine('sqlite://')
      +    Base.metadata.create_all(engine)
      +
      +    session = Session(engine)
      +
      +    # create catalog
      +    tshirt, mug, hat, crowbar = (
      +        Item('SA T-Shirt', 10.99),
      +        Item('SA Mug', 6.50),
      +        Item('SA Hat', 8.99),
      +        Item('MySQL Crowbar', 16.99)
      +    )
      +    session.add_all([tshirt, mug, hat, crowbar])
      +    session.commit()
      +
      +    # create an order
      +    order = Order('john smith')
      +
      +    # add items via the association proxy.
      +    # the OrderItem is created automatically.
      +    order.items.append(mug)
      +    order.items.append(hat)
      +
      +    # add an OrderItem explicitly.
      +    order.order_items.append(OrderItem(crowbar, 10.99))
      +
      +    session.add(order)
      +    session.commit()
      +
      +    # query the order, print items
      +    order = session.query(Order).filter_by(customer_name='john smith').one()
      +
      +    # print items based on the OrderItem collection directly
      +    print([(assoc.item.description, assoc.price, assoc.item.price)
      +           for assoc in order.order_items])
      +
      +    # print items based on the "proxied" items collection
      +    print([(item.description, item.price)
      +           for item in order.items])
      +
      +    # print customers who bought 'MySQL Crowbar' on sale
      +    orders = session.query(Order).\
      +                    join('order_items', 'item').\
      +                    filter(Item.description == 'MySQL Crowbar').\
      +                    filter(Item.price > OrderItem.price)
      +    print([o.customer_name for o in orders])
      +
      + +
      + + + +
      From pypy.commits at gmail.com Thu Jan 21 02:24:00 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 20 Jan 2016 23:24:00 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: fix the interaction between record_exact_class and the heapcache Message-ID: <56a08790.8205c20a.bf1e4.0054@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81881:7a18372094c5 Date: 2016-01-21 08:23 +0100 http://bitbucket.org/pypy/pypy/changeset/7a18372094c5/ Log: fix the interaction between record_exact_class and the heapcache diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -206,6 +206,7 @@ opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or + opnum == rop.RECORD_EXACT_CLASS or opnum == rop.RAW_STORE): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -707,3 +707,29 @@ res = self.interp_operations(fn, [0]) assert res == 0 self.check_operations_history(setfield_gc=0) + + def test_record_known_class_does_not_invalidate(self): + class A: + pass + class B(A): + pass + class C(object): + _immutable_fields_ = ['x?'] + c = C() + c.x = 5 + c.b = A() + c.b.x = 14 + def fn(n): + if n == 99: + c.x = 12 + c.b = B() + c.b.x = 12 + return 15 + b = c.b + x = b.x + jit.record_exact_class(c.b, A) + y = b.x + return x + y + res = self.interp_operations(fn, [1]) + assert res == 2 * 14 + self.check_operations_history(getfield_gc_i=1) From pypy.commits at gmail.com Thu Jan 21 03:04:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 00:04:24 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: fix test_pypy_c tests Message-ID: <56a09108.6adec20a.b3bf0.1040@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81882:7dfb24f5e8c5 Date: 2016-01-21 09:03 +0100 http://bitbucket.org/pypy/pypy/changeset/7dfb24f5e8c5/ Log: fix test_pypy_c tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -526,7 +526,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining + assert call_ops == ['force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -535,7 +535,6 @@ assert ops == [ # this is the actual loop 'guard_not_invalidated', 'int_lt', 'guard_true', 'force_token', 'int_add', - 'force_token', 'int_add', # this is the signal checking stuff 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -71,7 +71,7 @@ entry_bridge, = log.loops_by_id('call', is_entry_bridge=True) # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') - assert log.opnames(ops) == ["guard_value"] + assert log.opnames(ops) == [] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] # @@ -218,7 +218,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_not_invalidated", "guard_no_overflow"] + assert guards == ["guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -422,7 +422,6 @@ i114 = int_ne(i160, i112) guard_false(i114, descr=...) --TICK-- - i123 = arraylen_gc(p67, descr=) i119 = call_i(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) raw_store(i119, 0, i160, descr=) raw_store(i119, 2, i160, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -299,10 +299,12 @@ i129 = int_add(i55, i128) f149 = raw_load_f(i100, i129, descr=) i151 = int_add(i117, 1) + i92 = getfield_raw_i(54402752, descr=) setfield_gc(p156, i55, descr=) setarrayitem_gc(p150, 1, 0, descr=) setarrayitem_gc(p150, 0, 0, descr=) - --TICK-- + i95 = int_lt(i92, 0) + guard_false(i95, descr=...) jump(..., descr=...) """) @@ -354,12 +356,13 @@ guard_false(i92, descr=...) i93 = int_add(i91, 1) setfield_gc(p23, i93, descr=) + guard_not_invalidated? i94 = int_ge(i91, i56) guard_false(i94, descr=...) i96 = int_mul(i91, i58) i97 = int_add(i51, i96) f98 = raw_load_f(i63, i97, descr=) - guard_not_invalidated(descr=...) + guard_not_invalidated? f100 = float_mul(f98, 0.500000) i101 = int_add(i79, 1) i102 = arraylen_gc(p85, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -114,16 +114,14 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i12 = int_is_true(i4) guard_true(i12, descr=...) - guard_not_invalidated(descr=...) - i13 = int_add_ovf(i8, i9) + guard_not_invalidated? + i13 = int_add_ovf(i8, 2) guard_no_overflow(descr=...) - i10 = int_mul_ovf(2, i61) + i14 = int_add_ovf(i13, 2) guard_no_overflow(descr=...) - i14 = int_add_ovf(i13, i10) - guard_no_overflow(descr=...) - setfield_gc(p7, i11, descr=...) i17 = int_sub_ovf(i4, 1) guard_no_overflow(descr=...) --TICK-- @@ -151,9 +149,10 @@ setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) i18 = force_token() + i75 = int_sub(i71, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) - i22 = int_lt(i10, i14) + i22 = int_lt(i10, _) guard_true(i22, descr=...) i23 = int_add_ovf(i6, i10) guard_no_overflow(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -33,6 +33,7 @@ import thread local = thread._local() local.x = 1 + local.x = 2 # make it not constant i = 0 while i < n: i += local.x @@ -41,9 +42,10 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i53 = int_lt(i48, i27) guard_true(i53, descr=...) - guard_not_invalidated(descr=...) + guard_not_invalidated? i54 = int_add_ovf(i48, i47) guard_no_overflow(descr=...) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -25,8 +25,6 @@ i61 = int_add(i58, 1) setfield_gc(p18, i61, descr=) guard_not_invalidated(descr=...) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p67 = force_token() From pypy.commits at gmail.com Thu Jan 21 03:51:44 2016 From: pypy.commits at gmail.com (Vincent Legoll) Date: Thu, 21 Jan 2016 00:51:44 -0800 (PST) Subject: [pypy-commit] pypy default: Fix test_rabspath_absolute_nt Message-ID: <56a09c20.85e41c0a.93cdb.ffffe7eb@mx.google.com> Author: Vincent Legoll Branch: Changeset: r81883:49cd97514141 Date: 2016-01-21 08:52 +0100 http://bitbucket.org/pypy/pypy/changeset/49cd97514141/ Log: Fix test_rabspath_absolute_nt rpath._nt_rsplitdrive() returns a tuple, use the right part of it diff --git a/rpython/rlib/test/test_rpath.py b/rpython/rlib/test/test_rpath.py --- a/rpython/rlib/test/test_rpath.py +++ b/rpython/rlib/test/test_rpath.py @@ -68,8 +68,8 @@ assert rpath._nt_rabspath('d:\\foo\\bar\\..') == 'd:\\foo' assert rpath._nt_rabspath('d:\\foo\\bar\\..\\x') == 'd:\\foo\\x' curdrive = _ = rpath._nt_rsplitdrive(os.getcwd()) - assert len(curdrive) == 2 and curdrive[1] == ':' - assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive + assert len(curdrive) == 2 and curdrive[0][1] == ':' + assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive[0] def test_risabs_posix(): assert rpath._posix_risabs('/foo/bar') From pypy.commits at gmail.com Thu Jan 21 04:10:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 01:10:00 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: made register r13 dirty before flushing the non volatile registers to the stack, Message-ID: <56a0a068.2457c20a.a069d.2ac2@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81884:f5ab3026a4db Date: 2016-01-21 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/f5ab3026a4db/ Log: made register r13 dirty before flushing the non volatile registers to the stack, it seems that this cannot happen in the test suite because it is either ffi/ctypes that restores a constant r13 after the call and the translated tests might not need r13 after finishing the jit code diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -633,7 +633,6 @@ looptoken, clt.allgcrefs) self.pool.pre_assemble(self, operations) entrypos = self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - entrypos)) self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, @@ -1000,6 +999,7 @@ def _call_header(self): # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES self.mc.STMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) # save the back chain self.mc.STG(r.SP, l.addr(0, r.SP)) From pypy.commits at gmail.com Thu Jan 21 05:25:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 02:25:26 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: it can happen that longevity does not contain an entry for an operation (e.g. int_mul_ovf and result is not used), then when trying to spill a variable op can be in reg_bindings, but is not in longevity -> KeyError, Message-ID: <56a0b216.022f1c0a.31567.ffffb0f2@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81885:7ac200cdeecd Date: 2016-01-21 11:24 +0100 http://bitbucket.org/pypy/pypy/changeset/7ac200cdeecd/ Log: it can happen that longevity does not contain an entry for an operation (e.g. int_mul_ovf and result is not used), then when trying to spill a variable op can be in reg_bindings, but is not in longevity -> KeyError, fixed this by ensuring that the pair allocation happens at the latest point in the regalloc step diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -51,11 +51,11 @@ a1 = op.getarg(1) if a0.is_constant(): a0, a1 = a1, a0 - lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) if check_imm32(a1): l1 = imm(a1.getint()) else: l1 = self.ensure_reg(a1) + lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) self.free_op_vars() return [lr, lq, l1] @@ -63,15 +63,14 @@ def f(self, op): a0 = op.getarg(0) a1 = op.getarg(1) + l1 = self.ensure_reg(a1) if isinstance(a0, Const): poolloc = self.ensure_reg(a0) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus, must_exist=False) self.assembler.mc.LG(lq, poolloc) else: lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus) - l1 = self.ensure_reg(a1) self.free_op_vars() - self.rm._check_invariants() return [lr, lq, l1] return f diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -952,19 +952,19 @@ def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) + startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + tempvar = TempInt() + self.rm.temp_boxes.append(tempvar) + ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, tempvar, + bind_first=True, must_exist=False, move_regs=False) base_loc, length_loc = self.rm.ensure_even_odd_pair(op.getarg(0), op, bind_first=True, must_exist=False, load_loc_odd=False) - tempvar = TempInt() - self.rm.temp_boxes.append(tempvar) - pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, tempvar, - bind_first=True, must_exist=False, move_regs=False) - startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) length_box = op.getarg(2) ll = self.rm.loc(length_box) if length_loc is not ll: self.assembler.regalloc_mov(ll, length_loc) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize), pad_byte] def prepare_cond_call(self, op): From pypy.commits at gmail.com Thu Jan 21 14:26:17 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 21 Jan 2016 11:26:17 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Cleanup based on suggestions from fijal Message-ID: <56a130d9.c6e01c0a.6e68b.6111@mx.google.com> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r81886:3a92e4541f68 Date: 2016-01-21 14:13 -0500 http://bitbucket.org/pypy/pypy/changeset/3a92e4541f68/ Log: Cleanup based on suggestions from fijal diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -5,7 +5,7 @@ ConstIntBound, MININT, MAXINT, IntUnbounded from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, AbstractResOp, GuardResOp,\ - OpHelpers, ResOperation, is_pure_getfield + OpHelpers, ResOperation from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated @@ -756,7 +756,7 @@ opnum = op.getopnum() cpu = self.cpu - if is_pure_getfield(opnum, op.getdescr()): + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): fielddescr = op.getdescr() ref = self.get_constant_box(op.getarg(0)).getref_base() cpu.protect_speculative_field(ref, fielddescr) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2095,21 +2095,7 @@ profiler = self.staticdata.profiler profiler.count_ops(opnum) resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) - # - is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST - if not is_pure: - # TODO Don't base purity of an operation solely on opnum - if (opnum == rop.GETFIELD_RAW_I or - opnum == rop.GETFIELD_RAW_R or - opnum == rop.GETFIELD_RAW_F or - opnum == rop.GETFIELD_GC_I or - opnum == rop.GETFIELD_GC_R or - opnum == rop.GETFIELD_GC_F or - opnum == rop.GETARRAYITEM_RAW_I or - opnum == rop.GETARRAYITEM_RAW_F): - is_pure = descr.is_always_pure() - # - if is_pure: + if OpHelpers.is_pure_with_descr(opnum, descr): return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -246,11 +246,6 @@ def forget_value(self): pass -def is_pure_getfield(opnum, descr): - if opnum not in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R): - return False - return descr is not None and descr.is_always_pure() - class AbstractResOp(AbstractResOpOrInputArg): """The central ResOperation class, representing one operation.""" @@ -1757,4 +1752,26 @@ opnum = rop.VEC_UNPACK_F return VecOperationNew(opnum, args, datatype, bytesize, signed, count) + @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + @staticmethod + def is_pure_with_descr(opnum, descr): + is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST + if not is_pure: + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + is_pure = descr.is_always_pure() + return is_pure + From pypy.commits at gmail.com Thu Jan 21 14:38:38 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 11:38:38 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: advancing the pointer to correctly return the value written from libffi, the reason why this does not happen on e.g. ppc bigendian 64 bit is: libffi ppc casts the return value to the requested type (s390x does not and we have discussed this enough already) Message-ID: <56a133be.c615c20a.64185.0c66@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81887:08606f22af4a Date: 2016-01-21 20:37 +0100 http://bitbucket.org/pypy/pypy/changeset/08606f22af4a/ Log: advancing the pointer to correctly return the value written from libffi, the reason why this does not happen on e.g. ppc bigendian 64 bit is: libffi ppc casts the return value to the requested type (s390x does not and we have discussed this enough already) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,8 +1,10 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from rpython.jit.backend.llsupport.symbolic import WORD from rpython.rlib.clibffi import * from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -19,6 +21,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +335,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +350,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -497,6 +506,11 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and result.shape.size < WORD: + # we get a 8 byte value in big endian + n = WORD - result.shape.size + result.buffer_advance(n) + return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) From pypy.commits at gmail.com Thu Jan 21 14:54:06 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:06 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: another one, for loop constants Message-ID: <56a1375e.2851c20a.c3793.0fe8@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81889:28a4d2d1f711 Date: 2016-01-21 13:51 +0100 http://bitbucket.org/pypy/pypy/changeset/28a4d2d1f711/ Log: another one, for loop constants diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -625,3 +625,19 @@ i += 1 return 13 """, [1000]) + + def test_no_type_check_for_loopconst(self): + log = self.run(""" + + def main(stop): + a = [1, 2] # always a W_ListObject + i = 0 + while i < stop: + i += isinstance(a, list) # ID: isinstance + return 13 + """, [1000]) + entry_bridge, = log.loops_by_id('isinstance', is_entry_bridge=True) + entry_bridge.match_by_id("isinstance", """ + i1 = int_add(i0, 1) + --TICK-- + """) From pypy.commits at gmail.com Thu Jan 21 14:54:05 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:05 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: add some tests about the new instance features Message-ID: <56a1375d.6adec20a.b3bf0.1330@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81888:d9d8458cbfb3 Date: 2016-01-21 09:23 +0100 http://bitbucket.org/pypy/pypy/changeset/d9d8458cbfb3/ Log: add some tests about the new instance features diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -280,3 +280,68 @@ --TICK-- jump(..., descr=...) """) + + def test_mutate_instance_int(self): + def main(): + class A(object): + pass + a = A() + + a.x = 0 + a.upper = 1000 + while a.x < a.upper: + a.x += 1 + return a.x + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + guard_not_invalidated? + i33 = int_lt(i28, 1000) + guard_true(i33, descr=...) + i34 = int_add(i28, 1) + i35 = getfield_raw_i(..., descr=...) + setfield_gc(p23, i34, descr=...) + i36 = int_lt(i35, 0) + guard_false(i36, descr=...) + jump(..., descr=...) + """) + + def test_instances_class_is_known(self): + def main(): + class A(object): + pass + class B(object): + pass + a1 = A() + a2 = A() + + a1.a = B() + a2.a = B() + i = 0 + while i < 1000: + a1.a.x = i + 0.2 + a1, a2 = a2, a1 + i += 1 + return a1.a.x + a2.a.x + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + guard_not_invalidated? + i42 = int_lt(i37, 1000) + guard_true(i42, descr=...) + f50 = cast_int_to_float(i37) + f51 = float_add(f50, 0.200000) + p43 = getfield_gc_r(p16, descr=) + guard_value(p43, ConstPtr(ptr31), descr=...) + p44 = getfield_gc_r(p16, descr=) + p45 = getfield_gc_r(p44, descr=) + guard_value(p45, ConstPtr(ptr34), descr=...) + p46 = getfield_gc_r(p44, descr=) + i47 = int_add(i37, 1) + i48 = getfield_raw_i(54402752, descr=...) + setfield_gc(p46, f51, descr=) + i49 = int_lt(i48, 0) + guard_false(i49, descr=...) + jump(..., descr=...) + """) + From pypy.commits at gmail.com Thu Jan 21 14:54:12 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:12 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: document the profiler Message-ID: <56a13764.53ad1c0a.96b67.ffffe528@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81892:6fb5c569a60c Date: 2016-01-21 18:34 +0100 http://bitbucket.org/pypy/pypy/changeset/6fb5c569a60c/ Log: document the profiler diff --git a/pypy/interpreter/valueprof.py b/pypy/interpreter/valueprof.py --- a/pypy/interpreter/valueprof.py +++ b/pypy/interpreter/valueprof.py @@ -8,13 +8,39 @@ SEEN_TOO_MUCH = '?' class ValueProf(object): + """ Some reusal heap profiling infrastructure. Can be used either as a base + class, or as a mixin. + + The idea of this class is to have one ValueProf instance for many heap + storage cells that are likely to store the same content. An example is + having a ValueProf per field of a specific class. """ + _immutable_fields_ = ['_vprof_status?'] def __init__(self, msg=''): # only if you subclass normally self.init_valueprof(msg) + + # ________________________________________________________________________ + # abstract methods that need to be overridden: + + def is_int(self, w_obj): + """ returns whether the argument is a boxed integer. """ + raise NotImplementedError("abstract base") + + def get_int_val(self, w_obj): + """ w_obj must be a boxed integer. returns the unboxed value of that + integer. """ + raise NotImplementedError("abstract base") + + + # ________________________________________________________________________ + # public interface + def init_valueprof(self, msg=''): + """ initialize the profiler. must be called if ValueProf is used as a + mixin upon construction. """ self._vprof_status = SEEN_NOTHING self._vprof_value_int = 0 self._vprof_value_wref = dead_ref @@ -22,28 +48,6 @@ self._vprof_counter = 0 self._vprof_msg = msg - def is_int(self, w_obj): - raise NotImplementedError("abstract base") - - def get_int_val(self, w_obj): - raise NotImplementedError("abstract base") - - def write_necessary(self, w_value): - status = self._vprof_status - if status == SEEN_TOO_MUCH: - return True - # we must have seen something already, because it only makes sense to - # call write_necessary if there is already a value there - assert not status == SEEN_NOTHING - if status == SEEN_CONSTANT_INT: - return (self.is_int(w_value) and - self.read_constant_int() != self.get_int_val(w_value)) - elif status == SEEN_CONSTANT_OBJ: - prev_obj = self.try_read_constant_obj() - return prev_obj is not w_value - return True - - def see_write(self, w_value): """ inform the value profiler of a write.""" status = self._vprof_status @@ -93,26 +97,61 @@ self._vprof_status = SEEN_TOO_MUCH return + def write_necessary(self, w_value): + """ for an already initialized object check whether writing w_value + into the object is necessary. it is unnecessary if the profiler knows + the value is a constant and that constant is equal to w_value. """ + status = self._vprof_status + if status == SEEN_TOO_MUCH: + return True + # we must have seen something already, because it only makes sense to + # call write_necessary if there is already a value there + assert not status == SEEN_NOTHING + if status == SEEN_CONSTANT_INT: + return (self.is_int(w_value) and + self.read_constant_int() != self.get_int_val(w_value)) + elif status == SEEN_CONSTANT_OBJ: + prev_obj = self.try_read_constant_obj() + return prev_obj is not w_value + return True + def can_fold_read_int(self): + """ returns True if the heap profiler knows that the object stores a + constant integer. """ return self._vprof_status == SEEN_CONSTANT_INT def can_fold_read_obj(self): + """ returns True if the heap profiler knows that the object stores a + constant non-integer object. """ return self._vprof_status == SEEN_CONSTANT_OBJ def class_is_known(self): + """ returns True if the heap profiler knows the class of the stored + object. """ return self._vprof_status == SEEN_CONSTANT_CLASS @jit.elidable def read_constant_int(self): + """ returns the stored constant integer value in unboxed form. this + must only be called directly after having called + self.can_fold_read_int() and that returned True. """ assert self.can_fold_read_int() return self._vprof_value_int @jit.elidable def try_read_constant_obj(self): + """ tries to return the stored constant object. this must only be + called directly after having called self.can_fold_read_obj() and that + returned True. The method may still return False, if the constant + object was garbage collected in the meantime.""" assert self.can_fold_read_obj() return self._vprof_value_wref() @jit.elidable def read_constant_cls(self): + """ returns the class of the stored object. this must only be called + directly after having called self.class_is_known() and that returned + True. The returned class is typically used with + jit.record_exact_class(..., class)""" return self._vprof_const_cls From pypy.commits at gmail.com Thu Jan 21 14:54:14 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:14 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: move the valueprof to rlib Message-ID: <56a13766.552f1c0a.1091.ffffe302@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81893:cfa89ffc2f8a Date: 2016-01-21 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/cfa89ffc2f8a/ Log: move the valueprof to rlib diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -4,7 +4,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import valueprof +from rpython.rlib import valueprof from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, BaseValueIterator, BaseItemIterator, _never_equal_to_string, diff --git a/pypy/interpreter/test/test_valueprof.py b/rpython/rlib/test/test_valueprof.py rename from pypy/interpreter/test/test_valueprof.py rename to rpython/rlib/test/test_valueprof.py --- a/pypy/interpreter/test/test_valueprof.py +++ b/rpython/rlib/test/test_valueprof.py @@ -1,4 +1,4 @@ -from pypy.interpreter.valueprof import * +from rpython.rlib.valueprof import * class Value(object): pass diff --git a/pypy/interpreter/valueprof.py b/rpython/rlib/valueprof.py rename from pypy/interpreter/valueprof.py rename to rpython/rlib/valueprof.py From pypy.commits at gmail.com Thu Jan 21 14:54:08 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:08 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: a test for lists of known types Message-ID: <56a13760.c6e01c0a.6e68b.6989@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81890:09164c7f58aa Date: 2016-01-21 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/09164c7f58aa/ Log: a test for lists of known types diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -269,3 +269,22 @@ loop, = log.loops_by_filename(self.filepath) opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + + def test_list_of_known_types(self): + def main(n): + l = [[]] * 1000 + i = 0 + while i < 1000: + # l[i] is not None is always True, because l[i] has known type + # W_ListObject + i += l[i] is not None # ID: typecheck + + log = self.run(main, [1000]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match_by_id("typecheck", """ + i40 = uint_ge(i34, i29) + guard_false(i40, descr=...) + p41 = getarrayitem_gc_r(p31, i34, descr=) + i42 = int_add(i34, 1) + --TICK-- + """) From pypy.commits at gmail.com Thu Jan 21 14:54:15 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:15 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: rename valueprof to heapprof Message-ID: <56a13767.ccaa1c0a.cdb63.ffffedb5@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81894:b6d1c55ce142 Date: 2016-01-21 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/b6d1c55ce142/ Log: rename valueprof to heapprof diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -4,7 +4,7 @@ from rpython.rlib.rarithmetic import intmask, r_uint from pypy.interpreter.baseobjspace import W_Root -from rpython.rlib import valueprof +from rpython.rlib import heapprof from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, BaseValueIterator, BaseItemIterator, _never_equal_to_string, @@ -335,7 +335,7 @@ class PlainAttribute(AbstractAttribute): _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?', 'can_contain_mutable_cell?'] - objectmodel.import_from_mixin(valueprof.ValueProf) + objectmodel.import_from_mixin(heapprof.HeapProf) def __init__(self, name, index, back): AbstractAttribute.__init__(self, back.space, back.terminator) @@ -345,7 +345,7 @@ self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False - self.init_valueprof('%s.%s' % (back.terminator.w_cls.name if back.terminator.w_cls else '???', name)) + self.init_heapprof('%s.%s' % (back.terminator.w_cls.name if back.terminator.w_cls else '???', name)) # this flag means: at some point there was an instance that used a # derivative of this map that had a MutableCell stored into the # corresponding field. @@ -360,7 +360,7 @@ return self._read_cell(result) # ____________________________________________________________ - # methods for ValueProf mixin + # methods for HeapProf mixin def is_int(self, w_obj): from pypy.objspace.std.intobject import W_IntObject return type(w_obj) is W_IntObject diff --git a/rpython/rlib/valueprof.py b/rpython/rlib/heapprof.py rename from rpython/rlib/valueprof.py rename to rpython/rlib/heapprof.py --- a/rpython/rlib/valueprof.py +++ b/rpython/rlib/heapprof.py @@ -7,19 +7,19 @@ SEEN_CONSTANT_CLASS = 'c' SEEN_TOO_MUCH = '?' -class ValueProf(object): +class HeapProf(object): """ Some reusal heap profiling infrastructure. Can be used either as a base class, or as a mixin. - The idea of this class is to have one ValueProf instance for many heap + The idea of this class is to have one HeapProf instance for many heap storage cells that are likely to store the same content. An example is - having a ValueProf per field of a specific class. """ + having a HeapProf per field of a specific class. """ - _immutable_fields_ = ['_vprof_status?'] + _immutable_fields_ = ['_hprof_status?'] def __init__(self, msg=''): # only if you subclass normally - self.init_valueprof(msg) + self.init_heapprof(msg) # ________________________________________________________________________ @@ -38,70 +38,70 @@ # ________________________________________________________________________ # public interface - def init_valueprof(self, msg=''): - """ initialize the profiler. must be called if ValueProf is used as a + def init_heapprof(self, msg=''): + """ initialize the profiler. must be called if HeapProf is used as a mixin upon construction. """ - self._vprof_status = SEEN_NOTHING - self._vprof_value_int = 0 - self._vprof_value_wref = dead_ref - self._vprof_const_cls = None - self._vprof_counter = 0 - self._vprof_msg = msg + self._hprof_status = SEEN_NOTHING + self._hprof_value_int = 0 + self._hprof_value_wref = dead_ref + self._hprof_const_cls = None + self._hprof_counter = 0 + self._hprof_msg = msg def see_write(self, w_value): """ inform the value profiler of a write.""" - status = self._vprof_status + status = self._hprof_status if status == SEEN_TOO_MUCH: return if w_value is None: - self._vprof_status = SEEN_TOO_MUCH + self._hprof_status = SEEN_TOO_MUCH return if status == SEEN_NOTHING: if self.is_int(w_value): - self._vprof_value_int = self.get_int_val(w_value) - self._vprof_status = SEEN_CONSTANT_INT + self._hprof_value_int = self.get_int_val(w_value) + self._hprof_status = SEEN_CONSTANT_INT else: try: - self._vprof_value_wref = ref(w_value) + self._hprof_value_wref = ref(w_value) except TypeError: # for tests, which really use unwrapped ints in a few places - self._vprof_status = SEEN_TOO_MUCH + self._hprof_status = SEEN_TOO_MUCH else: - self._vprof_const_cls = w_value.__class__ - self._vprof_status = SEEN_CONSTANT_OBJ + self._hprof_const_cls = w_value.__class__ + self._hprof_status = SEEN_CONSTANT_OBJ elif status == SEEN_CONSTANT_INT: if self.is_int(w_value): if self.read_constant_int() != self.get_int_val(w_value): - self._vprof_status = SEEN_CONSTANT_CLASS - self._vprof_const_cls = w_value.__class__ + self._hprof_status = SEEN_CONSTANT_CLASS + self._hprof_const_cls = w_value.__class__ else: return else: - self._vprof_status = SEEN_TOO_MUCH + self._hprof_status = SEEN_TOO_MUCH elif status == SEEN_CONSTANT_OBJ: prev_obj = self.try_read_constant_obj() if prev_obj is not w_value: prev_cls = self.read_constant_cls() if prev_cls is w_value.__class__: - self._vprof_const_cls = prev_cls - self._vprof_status = SEEN_CONSTANT_CLASS + self._hprof_const_cls = prev_cls + self._hprof_status = SEEN_CONSTANT_CLASS else: - self._vprof_status = SEEN_TOO_MUCH + self._hprof_status = SEEN_TOO_MUCH else: return elif status == SEEN_CONSTANT_CLASS: cls = self.read_constant_cls() if cls is not w_value.__class__: - self._vprof_status = SEEN_TOO_MUCH + self._hprof_status = SEEN_TOO_MUCH return def write_necessary(self, w_value): """ for an already initialized object check whether writing w_value into the object is necessary. it is unnecessary if the profiler knows the value is a constant and that constant is equal to w_value. """ - status = self._vprof_status + status = self._hprof_status if status == SEEN_TOO_MUCH: return True # we must have seen something already, because it only makes sense to @@ -118,17 +118,17 @@ def can_fold_read_int(self): """ returns True if the heap profiler knows that the object stores a constant integer. """ - return self._vprof_status == SEEN_CONSTANT_INT + return self._hprof_status == SEEN_CONSTANT_INT def can_fold_read_obj(self): """ returns True if the heap profiler knows that the object stores a constant non-integer object. """ - return self._vprof_status == SEEN_CONSTANT_OBJ + return self._hprof_status == SEEN_CONSTANT_OBJ def class_is_known(self): """ returns True if the heap profiler knows the class of the stored object. """ - return self._vprof_status == SEEN_CONSTANT_CLASS + return self._hprof_status == SEEN_CONSTANT_CLASS @jit.elidable def read_constant_int(self): @@ -136,7 +136,7 @@ must only be called directly after having called self.can_fold_read_int() and that returned True. """ assert self.can_fold_read_int() - return self._vprof_value_int + return self._hprof_value_int @jit.elidable def try_read_constant_obj(self): @@ -145,7 +145,7 @@ returned True. The method may still return False, if the constant object was garbage collected in the meantime.""" assert self.can_fold_read_obj() - return self._vprof_value_wref() + return self._hprof_value_wref() @jit.elidable def read_constant_cls(self): @@ -153,5 +153,5 @@ directly after having called self.class_is_known() and that returned True. The returned class is typically used with jit.record_exact_class(..., class)""" - return self._vprof_const_cls + return self._hprof_const_cls diff --git a/rpython/rlib/test/test_valueprof.py b/rpython/rlib/test/test_heapprof.py rename from rpython/rlib/test/test_valueprof.py rename to rpython/rlib/test/test_heapprof.py --- a/rpython/rlib/test/test_valueprof.py +++ b/rpython/rlib/test/test_heapprof.py @@ -1,4 +1,4 @@ -from rpython.rlib.valueprof import * +from rpython.rlib.heapprof import * class Value(object): pass @@ -11,7 +11,7 @@ self.intval = val -class ValueProf(ValueProf): +class HeapProf(HeapProf): def is_int(self, val): return isinstance(val, ValueInt) @@ -20,113 +20,113 @@ def test_int(): - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(ValueInt(1)) v.see_write(ValueInt(1)) v.see_write(ValueInt(1)) v.see_write(ValueInt(1)) assert v.read_constant_int() == 1 - assert v._vprof_status == SEEN_CONSTANT_INT + assert v._hprof_status == SEEN_CONSTANT_INT v.see_write(ValueInt(2)) - assert v._vprof_status == SEEN_CONSTANT_CLASS - assert v._vprof_const_cls is ValueInt + assert v._hprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_const_cls is ValueInt v.see_write(ValueInt(1)) - assert v._vprof_status == SEEN_CONSTANT_CLASS - assert v._vprof_const_cls is ValueInt + assert v._hprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_const_cls is ValueInt v.see_write(ValueInt(2)) - assert v._vprof_status == SEEN_CONSTANT_CLASS - assert v._vprof_const_cls is ValueInt + assert v._hprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_const_cls is ValueInt v.see_write(ValueInt(3)) - assert v._vprof_status == SEEN_CONSTANT_CLASS - assert v._vprof_const_cls is ValueInt + assert v._hprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_const_cls is ValueInt - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(ValueInt(1)) v.see_write(Value()) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH v.see_write(Value()) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH def test_obj(): - v = ValueProf() + v = HeapProf() value = Value() - assert v._vprof_status == SEEN_NOTHING + assert v._hprof_status == SEEN_NOTHING v.see_write(value) v.see_write(value) v.see_write(value) v.see_write(value) assert v.try_read_constant_obj() is value - assert v._vprof_status == SEEN_CONSTANT_OBJ + assert v._hprof_status == SEEN_CONSTANT_OBJ v.see_write(ValueInt(2)) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(Value()) v.see_write(OtherValue()) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH def test_none(): - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(None) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH v.see_write(None) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH - v = ValueProf() + v = HeapProf() v.see_write(ValueInt(1)) - assert v._vprof_status == SEEN_CONSTANT_INT + assert v._hprof_status == SEEN_CONSTANT_INT v.see_write(None) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH - v = ValueProf() + v = HeapProf() v.see_write(Value()) - assert v._vprof_status == SEEN_CONSTANT_OBJ + assert v._hprof_status == SEEN_CONSTANT_OBJ v.see_write(None) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH def test_known_class(): import gc - v = ValueProf() + v = HeapProf() value = Value() - assert v._vprof_status == SEEN_NOTHING + assert v._hprof_status == SEEN_NOTHING v.see_write(value) - assert v._vprof_status == SEEN_CONSTANT_OBJ + assert v._hprof_status == SEEN_CONSTANT_OBJ v.see_write(Value()) - assert v._vprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_status == SEEN_CONSTANT_CLASS v.see_write(OtherValue()) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(value) - assert v._vprof_status == SEEN_CONSTANT_OBJ + assert v._hprof_status == SEEN_CONSTANT_OBJ v.see_write(Value()) - assert v._vprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_status == SEEN_CONSTANT_CLASS v.see_write(ValueInt(5)) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(Value()) - assert v._vprof_status == SEEN_CONSTANT_OBJ + assert v._hprof_status == SEEN_CONSTANT_OBJ gc.collect() gc.collect() gc.collect() v.see_write(Value()) - assert v._vprof_status == SEEN_CONSTANT_CLASS + assert v._hprof_status == SEEN_CONSTANT_CLASS v.see_write(OtherValue()) - assert v._vprof_status == SEEN_TOO_MUCH + assert v._hprof_status == SEEN_TOO_MUCH def test_write_necessary_int(): - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING v.see_write(ValueInt(1)) res = v.write_necessary(ValueInt(1)) assert not res @@ -144,8 +144,8 @@ assert res def test_write_not_necessary_obj(): - v = ValueProf() - assert v._vprof_status == SEEN_NOTHING + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING val = Value() v.see_write(val) res = v.write_necessary(val) From pypy.commits at gmail.com Thu Jan 21 14:54:10 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 11:54:10 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: test write_necessary and make see_write return nothing Message-ID: <56a13762.cb571c0a.a54d.ffffe8a0@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81891:5812c89de335 Date: 2016-01-21 18:21 +0100 http://bitbucket.org/pypy/pypy/changeset/5812c89de335/ Log: test write_necessary and make see_write return nothing diff --git a/pypy/interpreter/test/test_valueprof.py b/pypy/interpreter/test/test_valueprof.py --- a/pypy/interpreter/test/test_valueprof.py +++ b/pypy/interpreter/test/test_valueprof.py @@ -124,37 +124,38 @@ v.see_write(OtherValue()) assert v._vprof_status == SEEN_TOO_MUCH -def test_write_not_necessary_int(): +def test_write_necessary_int(): v = ValueProf() assert v._vprof_status == SEEN_NOTHING - res = v.see_write(ValueInt(1)) + v.see_write(ValueInt(1)) + res = v.write_necessary(ValueInt(1)) assert not res - res = v.see_write(ValueInt(1)) - assert res - res = v.see_write(ValueInt(1)) + v.see_write(ValueInt(1)) + res = v.write_necessary(ValueInt(1)) + assert not res + res = v.see_write(ValueInt(2)) + res = v.write_necessary(ValueInt(1)) assert res res = v.see_write(ValueInt(2)) - assert not res - res = v.see_write(ValueInt(2)) - assert not res + res = v.write_necessary(ValueInt(1)) + assert res res = v.see_write(Value()) - assert not res + res = v.write_necessary(ValueInt(1)) + assert res def test_write_not_necessary_obj(): v = ValueProf() assert v._vprof_status == SEEN_NOTHING val = Value() - res = v.see_write(val) + v.see_write(val) + res = v.write_necessary(val) assert not res - res = v.see_write(val) + v.see_write(val) + res = v.write_necessary(val) + assert not res + v.see_write(ValueInt(1)) + res = v.write_necessary(ValueInt(1)) assert res - res = v.see_write(val) + v.see_write(Value()) + res = v.write_necessary(Value()) assert res - res = v.see_write(ValueInt(1)) - assert not res - res = v.see_write(ValueInt(2)) - assert not res - res = v.see_write(ValueInt(2)) - assert not res - res = v.see_write(Value()) - assert not res diff --git a/pypy/interpreter/valueprof.py b/pypy/interpreter/valueprof.py --- a/pypy/interpreter/valueprof.py +++ b/pypy/interpreter/valueprof.py @@ -45,17 +45,14 @@ def see_write(self, w_value): - """ inform the value profiler of a write. returns False, unless the - value is known to be a constant, and w_value that constant (in that - case the caller can elide the write to the actual object, if that - object already stores a value). """ + """ inform the value profiler of a write.""" status = self._vprof_status if status == SEEN_TOO_MUCH: - return False + return if w_value is None: self._vprof_status = SEEN_TOO_MUCH - return False + return if status == SEEN_NOTHING: if self.is_int(w_value): @@ -76,7 +73,7 @@ self._vprof_status = SEEN_CONSTANT_CLASS self._vprof_const_cls = w_value.__class__ else: - return True + return else: self._vprof_status = SEEN_TOO_MUCH elif status == SEEN_CONSTANT_OBJ: @@ -89,12 +86,12 @@ else: self._vprof_status = SEEN_TOO_MUCH else: - return True + return elif status == SEEN_CONSTANT_CLASS: cls = self.read_constant_cls() if cls is not w_value.__class__: self._vprof_status = SEEN_TOO_MUCH - return False + return def can_fold_read_int(self): return self._vprof_status == SEEN_CONSTANT_INT From pypy.commits at gmail.com Thu Jan 21 15:05:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 12:05:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: excluding structs from buffer advance (which would be wrong) Message-ID: <56a139ee.05bd1c0a.49f0d.ffff863d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81895:60aa103a575b Date: 2016-01-21 21:04 +0100 http://bitbucket.org/pypy/pypy/changeset/60aa103a575b/ Log: excluding structs from buffer advance (which would be wrong) fixed wide_char test (tested little endian) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -506,7 +506,7 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) - if BIGENDIAN and result.shape.size < WORD: + if BIGENDIAN and result.shape.itemcode in ('c','h','i','C','H','I'): # we get a 8 byte value in big endian n = WORD - result.shape.size result.buffer_advance(n) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -896,11 +896,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' From pypy.commits at gmail.com Thu Jan 21 15:50:12 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 21 Jan 2016 12:50:12 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: fix translation by killing dead import Message-ID: <56a14484.6918c20a.261a0.2089@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81896:cf041c3153dd Date: 2016-01-21 21:49 +0100 http://bitbucket.org/pypy/pypy/changeset/cf041c3153dd/ Log: fix translation by killing dead import diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -6,7 +6,7 @@ import dis, imp, struct, types, new, sys, os -from pypy.interpreter import eval, valueprof +from pypy.interpreter import eval from pypy.interpreter.signature import Signature from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec From pypy.commits at gmail.com Thu Jan 21 16:05:56 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 21 Jan 2016 13:05:56 -0800 (PST) Subject: [pypy-commit] pypy default: Merge remove-getfield-pure Message-ID: <56a14834.4f911c0a.60406.0539@mx.google.com> Author: Spenser Bauman Branch: Changeset: r81897:381c403794db Date: 2016-01-21 14:45 -0500 http://bitbucket.org/pypy/pypy/changeset/381c403794db/ Log: Merge remove-getfield-pure diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -83,9 +83,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -435,21 +435,21 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -466,7 +466,7 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) guard_not_invalidated? diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -84,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -93,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -82,7 +82,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -152,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure @@ -608,9 +608,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr @@ -200,7 +201,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1477,9 +1477,6 @@ genop_getfield_gc_f = _genop_getfield genop_getfield_raw_i = _genop_getfield genop_getfield_raw_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield def _genop_gc_load(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, sign_loc = arglocs diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -816,9 +816,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -183,6 +183,8 @@ return res def invalidate(self, descr): + if descr.is_always_pure(): + return for opinfo in self.cached_infos: assert isinstance(opinfo, info.AbstractStructPtrInfo) opinfo._fields[descr.get_index()] = None @@ -515,9 +517,14 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -525,23 +532,10 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) @@ -631,12 +625,12 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC_PURE(s, descr='inst_x') + # x = GETFIELD_GC(s, descr='inst_x') # pure # If 's' is a constant (after optimizations) we rely on the rest of the - # optimizations to constant-fold the following getfield_gc_pure. + # optimizations to constant-fold the following pure getfield_gc. # in addition, we record the dependency here to make invalidation work # correctly. - # NB: emitting the GETFIELD_GC_PURE is only safe because the + # NB: emitting the pure GETFIELD_GC is only safe because the # QUASIIMMUT_FIELD is also emitted to make sure the dependency is # registered. structvalue = self.ensure_ptr_info_arg0(op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print +from rpython.jit.metainterp.optimize import SpeculativeError @@ -374,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): @@ -736,12 +738,64 @@ self.emit_operation(op) def constant_fold(self, op): + self.protect_speculative_operation(op) argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] return execute_nonspec_const(self.cpu, None, op.getopnum(), argboxes, op.getdescr(), op.type) + def protect_speculative_operation(self, op): + """When constant-folding a pure operation that reads memory from + a gcref, make sure that the gcref is non-null and of a valid type. + Otherwise, raise SpeculativeError. This should only occur when + unrolling and optimizing the unrolled loop. Note that if + cpu.supports_guard_gc_type is false, we can't really do this + check at all, but then we don't unroll in that case. + """ + opnum = op.getopnum() + cpu = self.cpu + + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): + fielddescr = op.getdescr() + ref = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_field(ref, fielddescr) + return + + elif (opnum == rop.GETARRAYITEM_GC_PURE_I or + opnum == rop.GETARRAYITEM_GC_PURE_R or + opnum == rop.GETARRAYITEM_GC_PURE_F or + opnum == rop.ARRAYLEN_GC): + arraydescr = op.getdescr() + array = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_array(array, arraydescr) + if opnum == rop.ARRAYLEN_GC: + return + arraylength = cpu.bh_arraylen_gc(array, arraydescr) + + elif (opnum == rop.STRGETITEM or + opnum == rop.STRLEN): + string = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_string(string) + if opnum == rop.STRLEN: + return + arraylength = cpu.bh_strlen(string) + + elif (opnum == rop.UNICODEGETITEM or + opnum == rop.UNICODELEN): + unicode = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_unicode(unicode) + if opnum == rop.UNICODELEN: + return + arraylength = cpu.bh_unicodelen(unicode) + + else: + return + + index = self.get_constant_box(op.getarg(1)).getint() + if not (0 <= index < arraylength): + raise SpeculativeError + def is_virtual(self, op): if op.type == 'r': opinfo = self.getptrinfo(op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -94,7 +94,6 @@ break else: # all constant arguments: constant-fold away - self.protect_speculative_operation(op) resbox = self.optimizer.constant_fold(op) # note that INT_xxx_OVF is not done from here, and the # overflows in the INT_xxx operations are ignored @@ -119,59 +118,6 @@ if nextop: self.emit_operation(nextop) - def protect_speculative_operation(self, op): - """When constant-folding a pure operation that reads memory from - a gcref, make sure that the gcref is non-null and of a valid type. - Otherwise, raise SpeculativeError. This should only occur when - unrolling and optimizing the unrolled loop. Note that if - cpu.supports_guard_gc_type is false, we can't really do this - check at all, but then we don't unroll in that case. - """ - opnum = op.getopnum() - cpu = self.optimizer.cpu - - if (opnum == rop.GETFIELD_GC_PURE_I or - opnum == rop.GETFIELD_GC_PURE_R or - opnum == rop.GETFIELD_GC_PURE_F): - fielddescr = op.getdescr() - ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) - return - - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() - array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) - if opnum == rop.ARRAYLEN_GC: - return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) - - elif (opnum == rop.STRGETITEM or - opnum == rop.STRLEN): - string = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_string(string) - if opnum == rop.STRLEN: - return - arraylength = cpu.bh_strlen(string) - - elif (opnum == rop.UNICODEGETITEM or - opnum == rop.UNICODELEN): - unicode = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_unicode(unicode) - if opnum == rop.UNICODELEN: - return - arraylength = cpu.bh_unicodelen(unicode) - - else: - return - - index = self.get_constant_box(op.getarg(1)).getint() - if not (0 <= index < arraylength): - raise SpeculativeError - def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: opnum = opnum - rop._OVF_FIRST diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -521,8 +521,8 @@ def test_getfield(self): graph = self.build_dependency(""" [p0, p1] # 0: 1,2,5 - p2 = getfield_gc_r(p0) # 1: 3,5 - p3 = getfield_gc_r(p0) # 2: 4 + p2 = getfield_gc_r(p0, descr=valuedescr) # 1: 3,5 + p3 = getfield_gc_r(p0, descr=valuedescr) # 2: 4 guard_nonnull(p2) [p2] # 3: 4,5 guard_nonnull(p3) [p3] # 4: 5 jump(p0,p2) # 5: @@ -532,10 +532,10 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,5 + p13 = getfield_gc_r(p9, descr=valuedescr) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 - i14 = getfield_gc_i(p9) # 3: 5 - p15 = getfield_gc_r(p13) # 4: 5 + i14 = getfield_gc_i(p9, descr=valuedescr) # 3: 5 + p15 = getfield_gc_r(p13, descr=valuedescr) # 4: 5 guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -955,12 +955,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -969,17 +969,16 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) - """ - self.node.value = 5 + jump(7) + """ self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1343,7 +1342,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1355,7 +1354,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1597,7 +1596,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1608,7 +1607,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -5076,7 +5075,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ @@ -5462,15 +5461,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1409,12 +1409,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_pure_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1423,10 +1423,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_pure_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1436,20 +1436,20 @@ self.node.value = 5 self.optimize_loop(ops, expected) - def test_getfield_gc_pure_3(self): + def test_pure_getfield_gc_3(self): ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2319,7 +2319,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2332,7 +2332,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2340,11 +2340,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2352,8 +2353,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2669,7 +2669,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2680,7 +2680,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -3302,8 +3302,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -3330,8 +3330,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3522,14 +3522,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4878,11 +4878,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4972,18 +4972,16 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - - """ - self.node.value = 5 + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) + """ self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4992,7 +4990,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5002,7 +5000,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5010,7 +5008,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5021,7 +5019,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -6167,14 +6165,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -7229,13 +7227,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7246,7 +7244,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7298,11 +7296,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7325,11 +7323,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7568,7 +7566,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7578,7 +7576,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7852,14 +7850,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7868,7 +7866,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7883,14 +7881,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) - """ - expected = """ - [p0, p2, p1] + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) + """ + expected = """ + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -8065,7 +8063,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8160,9 +8158,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8201,14 +8199,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(ConstPtr(myptr), ConstPtr(myptr2), descr=otherdescr) + setfield_gc(ConstPtr(myptr), ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8232,14 +8230,14 @@ ops = """ [p0] p10 = getfield_gc_r(p0, descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(p0, ConstPtr(myptr2), descr=otherdescr) + setfield_gc(p0, ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8624,17 +8622,17 @@ [p10] p52 = getfield_gc_r(p10, descr=nextdescr) # inst_storage p54 = getarrayitem_gc_r(p52, 0, descr=arraydescr) - p69 = getfield_gc_pure_r(p54, descr=otherdescr) # inst_w_function + p69 = getfield_gc_r(p54, descr=otherdescr) # inst_w_function quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(descr=nodesize) p108 = new_array(3, descr=arraydescr) p110 = new_with_vtable(descr=nodesize) - setfield_gc(p110, ConstPtr(myptr2), descr=otherdescr) # inst_w_function + setfield_gc(p110, ConstPtr(myptrb), descr=otherdescr) # inst_w_function setarrayitem_gc(p108, 0, p110, descr=arraydescr) setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) @@ -8650,7 +8648,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ @@ -8852,13 +8850,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_r(p0, descr=nextdescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) @@ -8888,13 +8886,13 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarraydescr) + f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ expected = """ [p0] - ifoo = arraylen_gc(p0, descr=floatarraydescr) + ifoo = arraylen_gc(p0, descr=floatarrayimmutdescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -9102,7 +9100,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) jump(NULL, i3) """ @@ -9113,9 +9111,9 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) - jump(ConstPtr(myptr4), i3) + jump(ConstPtr(myptr2), i3) """ py.test.raises(InvalidLoop, self.optimize_loop, ops, ops) @@ -9265,9 +9263,126 @@ guard_value(i1, 5) [] jump() """ - a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE)), 5, zero=True) + a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) self.optimize_loop(ops, expected, jump_values=[a]) + def test_large_number_of_immutable_references(self): + ops = """ + [p0] + i0 = getfield_gc_i(p0, descr=bigadescr) + i1 = getfield_gc_i(p0, descr=bigbdescr) + i2 = getfield_gc_i(p0, descr=bigcdescr) + i3 = getfield_gc_i(p0, descr=bigddescr) + i4 = getfield_gc_i(p0, descr=bigedescr) + i5 = getfield_gc_i(p0, descr=bigfdescr) + i6 = getfield_gc_i(p0, descr=biggdescr) + i7 = getfield_gc_i(p0, descr=bighdescr) + i8 = getfield_gc_i(p0, descr=bigidescr) + i9 = getfield_gc_i(p0, descr=bigjdescr) + i10 = getfield_gc_i(p0, descr=bigkdescr) + i11 = getfield_gc_i(p0, descr=bigldescr) + i12 = getfield_gc_i(p0, descr=bigmdescr) + i13 = getfield_gc_i(p0, descr=bigndescr) + i14 = getfield_gc_i(p0, descr=bigodescr) + i15 = getfield_gc_i(p0, descr=bigpdescr) + i16 = getfield_gc_i(p0, descr=bigqdescr) + i17 = getfield_gc_i(p0, descr=bigrdescr) + i18 = getfield_gc_i(p0, descr=bigsdescr) + i19 = getfield_gc_i(p0, descr=bigtdescr) + i20 = getfield_gc_i(p0, descr=bigudescr) + i21 = getfield_gc_i(p0, descr=bigvdescr) + i22 = getfield_gc_i(p0, descr=bigwdescr) + i23 = getfield_gc_i(p0, descr=bigxdescr) + i24 = getfield_gc_i(p0, descr=bigydescr) + i25 = getfield_gc_i(p0, descr=bigzdescr) + i27 = getfield_gc_i(p0, descr=bigbdescr) + i28 = getfield_gc_i(p0, descr=bigcdescr) + i29 = getfield_gc_i(p0, descr=bigddescr) + i30 = getfield_gc_i(p0, descr=bigedescr) + i31 = getfield_gc_i(p0, descr=bigfdescr) + i32 = getfield_gc_i(p0, descr=biggdescr) + i33 = getfield_gc_i(p0, descr=bighdescr) + i34 = getfield_gc_i(p0, descr=bigidescr) + i35 = getfield_gc_i(p0, descr=bigjdescr) + i36 = getfield_gc_i(p0, descr=bigkdescr) + i37 = getfield_gc_i(p0, descr=bigldescr) + i38 = getfield_gc_i(p0, descr=bigmdescr) + i39 = getfield_gc_i(p0, descr=bigndescr) + i40 = getfield_gc_i(p0, descr=bigodescr) + i41 = getfield_gc_i(p0, descr=bigpdescr) + i42 = getfield_gc_i(p0, descr=bigqdescr) + i43 = getfield_gc_i(p0, descr=bigrdescr) + i44 = getfield_gc_i(p0, descr=bigsdescr) + i45 = getfield_gc_i(p0, descr=bigtdescr) + i46 = getfield_gc_i(p0, descr=bigudescr) + i47 = getfield_gc_i(p0, descr=bigvdescr) + i48 = getfield_gc_i(p0, descr=bigwdescr) + i49 = getfield_gc_i(p0, descr=bigxdescr) + i50 = getfield_gc_i(p0, descr=bigydescr) + i51 = getfield_gc_i(p0, descr=bigzdescr) + i26 = getfield_gc_i(p0, descr=bigadescr) + i99 = int_add(i26, i51) + escape_i(i27) + escape_i(i28) + escape_i(i29) + escape_i(i30) + escape_i(i31) + escape_i(i32) + escape_i(i33) + escape_i(i34) + escape_i(i35) + escape_i(i36) + escape_i(i37) + escape_i(i38) + escape_i(i39) + escape_i(i40) + escape_i(i41) + escape_i(i42) + escape_i(i43) + escape_i(i44) + escape_i(i45) + escape_i(i46) + escape_i(i47) + escape_i(i48) + escape_i(i49) + escape_i(i50) + escape_i(i51) + escape_i(i26) + escape_i(i99) + jump(p0) + """ + expected = """ + [p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99] + escape_i(i1) + escape_i(i2) + escape_i(i3) + escape_i(i4) + escape_i(i5) + escape_i(i6) + escape_i(i7) + escape_i(i8) + escape_i(i9) + escape_i(i10) + escape_i(i11) + escape_i(i12) + escape_i(i13) + escape_i(i14) + escape_i(i15) + escape_i(i16) + escape_i(i17) + escape_i(i18) + escape_i(i19) + escape_i(i20) + escape_i(i21) + escape_i(i22) + escape_i(i23) + escape_i(i24) + escape_i(i25) + escape_i(i0) + escape_i(i99) + jump(p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -220,16 +220,16 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr) + pc = getfield_gc_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches - p1 = getfield_gc_r(pc, descr=nextdescr) - i0 = getfield_gc_i(p1, descr=valuedescr) + p1 = getfield_gc_r(pc, descr=nextdescr3) + i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) assert len(es.short_boxes) == 4 # both getfields are available as - # well as getfield_gc_pure + # well as getfield_gc def test_p123_anti_nested(self): loop = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,4 +1,4 @@ -import py, random +import py, random, string from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -122,7 +122,14 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + + big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] + BIG = lltype.GcForwardReference() + BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) + + for field, _ in big_fields: + locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) + node = lltype.malloc(NODE) node.value = 5 node.next = node @@ -133,16 +140,25 @@ node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) - mynode2 = lltype.malloc(NODE) + mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) - mynode2.parent.typeptr = node_vtable - myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode2) - mynode3 = lltype.malloc(NODE2) - mynode3.parent.parent.typeptr = node_vtable2 + mynodeb.parent.typeptr = node_vtable + myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) + myptr2 = lltype.malloc(NODE2) + myptr2.parent.parent.typeptr = node_vtable2 + myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) + nullptr = lltype.nullptr(llmemory.GCREF.TO) + + mynode3 = lltype.malloc(NODE3) + mynode3.parent.typeptr = node_vtable3 + mynode3.value = 7 + mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) # a NODE2 mynode4 = lltype.malloc(NODE3) mynode4.parent.typeptr = node_vtable3 myptr4 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode4) # a NODE3 + + nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) @@ -203,7 +219,6 @@ arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) int32arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.INT)) int16arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.SHORT)) - floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) float32arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.SingleFloat)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) @@ -212,6 +227,12 @@ array2ref = lltype.cast_opaque_ptr(llmemory.GCREF, array2) gcarraydescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF)) gcarraydescr_tid = gcarraydescr.get_type_id() + floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) + + arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) + gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) + floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) @@ -244,7 +265,7 @@ tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') - arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE))) + arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1103,8 +1103,8 @@ jump(p0) """ self.optimize_bridge(loops, bridge, loops[0], 'Loop0', [self.myptr]) - self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr3]) - self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr3]) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr2]) + self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr2]) self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', [self.nullptr]) self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', [self.nullptr]) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -188,12 +188,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I - def optimize_SETFIELD_GC(self, op): struct = op.getarg(0) opinfo = self.getptrinfo(struct) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -653,46 +653,37 @@ @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_I, fielddescr, box) + return ConstInt(resbox) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_I, box, fielddescr, 'i') + + @arguments("box", "descr") + def opimpl_getfield_gc_f(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resvalue = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_F, fielddescr, box) + return ConstFloat(resvalue) + return self._opimpl_getfield_gc_any_pureornot( + rop.GETFIELD_GC_F, box, fielddescr, 'f') + @arguments("box", "descr") def opimpl_getfield_gc_r(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_R, fielddescr, box) + return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_R, box, fielddescr, 'r') - @arguments("box", "descr") - def opimpl_getfield_gc_f(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_i_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_I, fielddescr, box) - return ConstInt(resbox) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_I, box, fielddescr, 'i') - - @arguments("box", "descr") - def opimpl_getfield_gc_f_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_F, fielddescr, box) - return ConstFloat(resvalue) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_r_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_R, fielddescr, box) - return ConstPtr(val) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_R, box, fielddescr, 'r') + + opimpl_getfield_gc_i_pure = opimpl_getfield_gc_i + opimpl_getfield_gc_r_pure = opimpl_getfield_gc_r + opimpl_getfield_gc_f_pure = opimpl_getfield_gc_f @arguments("box", "box", "descr") def opimpl_getinteriorfield_gc_i(self, array, index, descr): @@ -733,7 +724,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info - opnum = OpHelpers.getfield_pure_for_descr(fielddescr) + opnum = OpHelpers.getfield_for_descr(fielddescr) if (ginfo is not None and fielddescr in ginfo.green_field_descrs and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't @@ -2104,17 +2095,7 @@ profiler = self.staticdata.profiler profiler.count_ops(opnum) resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) - # - is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST - if not is_pure: - if (opnum == rop.GETFIELD_RAW_I or - opnum == rop.GETFIELD_RAW_R or - opnum == rop.GETFIELD_RAW_F or - opnum == rop.GETARRAYITEM_RAW_I or - opnum == rop.GETARRAYITEM_RAW_F): - is_pure = descr.is_always_pure() - # - if is_pure: + if OpHelpers.is_pure_with_descr(opnum, descr): return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -231,7 +231,7 @@ class AbstractResOpOrInputArg(AbstractValue): _attrs_ = ('_forwarded',) - _forwarded = None # either another resop or OptInfo + _forwarded = None # either another resop or OptInfo def get_forwarded(self): return self._forwarded @@ -412,6 +412,8 @@ return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST def is_always_pure(self): + # Tells whether an operation is pure based solely on the opcode. + # Other operations (e.g. getfield ops) may be pure in some cases are well. return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): @@ -434,9 +436,7 @@ return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) def is_getarrayitem(self): return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, @@ -1602,14 +1602,6 @@ return rop.CALL_LOOPINVARIANT_N @staticmethod - def getfield_pure_for_descr(descr): - if descr.is_pointer_field(): - return rop.GETFIELD_GC_PURE_R - elif descr.is_float_field(): - return rop.GETFIELD_GC_PURE_F - return rop.GETFIELD_GC_PURE_I - - @staticmethod def getfield_for_descr(descr): if descr.is_pointer_field(): return rop.GETFIELD_GC_R @@ -1760,4 +1752,26 @@ opnum = rop.VEC_UNPACK_F return VecOperationNew(opnum, args, datatype, bytesize, signed, count) + @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + @staticmethod + def is_pure_with_descr(opnum, descr): + is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST + if not is_pure: + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + is_pure = descr.is_always_pure() + return is_pure + diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -320,7 +320,7 @@ assert res == 252 self.check_trace_count(1) self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, - 'getfield_gc_pure_i': 1, 'int_mul': 1, + 'getfield_gc_i': 1, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): @@ -1405,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) + self.check_operations_history(setfield_gc=2, getfield_gc_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -2522,7 +2522,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_resops(getfield_gc_pure_r=2) + self.check_resops(getfield_gc_r=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -19,7 +19,7 @@ return y.x + 5 res = self.interp_operations(f, [23]) assert res == 28 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=1, int_add=1) + self.check_operations_history(getfield_gc_i=1, int_add=1) def test_fields_subclass(self): class X(object): @@ -41,8 +41,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def f(x, y): # this time, the field 'x' only shows up on subclass 'Y' @@ -50,8 +49,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def test_array(self): class X(object): @@ -66,8 +64,7 @@ return a.y[index] res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_index_error(self): class X(object): @@ -89,8 +86,7 @@ return a.get(index) res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_in_immutable(self): class X(object): @@ -106,8 +102,7 @@ return y.lst[index] + y.y + 5 res = self.interp_operations(f, [23, 0], listops=True) assert res == 23 + 24 + 5 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getfield_gc_pure_i=1, + self.check_operations_history(getfield_gc_r=1, getfield_gc_i=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1, int_add=3) diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -74,7 +74,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -101,7 +101,7 @@ res = self.meta_interp(f, [100, 7], enable_opts="") assert res == 700 # there should be no getfields, even though optimizations are turned off - self.check_resops(guard_not_invalidated=1, getfield_gc=0) + self.check_resops(guard_not_invalidated=1) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -124,8 +124,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, - getfield_gc_pure_i=2) + self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, getfield_gc_i=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -156,7 +155,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -208,7 +207,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=0) + self.check_resops(guard_not_invalidated=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -234,7 +233,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_resops(guard_not_invalidated=4, getfield_gc=0) + self.check_resops(guard_not_invalidated=4) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -348,7 +347,7 @@ res = self.meta_interp(f, [100, 30]) assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, - call_may_force=0, getfield_gc=0) + call_may_force=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -374,8 +373,7 @@ getarrayitem_gc_pure_r=0, getarrayitem_gc_i=0, getarrayitem_gc_r=0, - getfield_gc_i=0, getfield_gc_pure_i=0, - getfield_gc_r=0, getfield_gC_pure_r=0) + getfield_gc_i=0, getfield_gc_r=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -405,9 +403,7 @@ assert res == 700 # operations must have been removed by the frontend self.check_resops(getarrayitem_gc_pure_i=0, guard_not_invalidated=1, - getarrayitem_gc_i=0, - getfield_gc=0, getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + getarrayitem_gc_i=0, getfield_gc_i=0, getfield_gc_r=0) def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -436,10 +436,10 @@ return p.x[0] + p.x[1] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == -7 - 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) def test_heap_caching_and_elidable_function(self): class A: @@ -517,12 +517,12 @@ return a1[0] + a2[0] + gn(a1, a2) res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) def test_heap_caching_multiple_arrays(self): class Gbl(object): diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1077,7 +1077,7 @@ res = self.meta_interp(f, [], repeat=7) assert res == f() - def test_getfield_gc_pure_nobug(self): + def test_pure_getfield_gc_nobug(self): mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): From pypy.commits at gmail.com Thu Jan 21 16:05:58 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 21 Jan 2016 13:05:58 -0800 (PST) Subject: [pypy-commit] pypy default: Merge latest changes Message-ID: <56a14836.17941c0a.2b56c.063a@mx.google.com> Author: Spenser Bauman Branch: Changeset: r81898:ee7c705360c7 Date: 2016-01-21 16:05 -0500 http://bitbucket.org/pypy/pypy/changeset/ee7c705360c7/ Log: Merge latest changes diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -83,9 +83,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -435,21 +435,21 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -466,7 +466,7 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) guard_not_invalidated? diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -84,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -93,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -82,7 +82,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -152,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure @@ -608,9 +608,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr @@ -200,7 +201,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1477,9 +1477,6 @@ genop_getfield_gc_f = _genop_getfield genop_getfield_raw_i = _genop_getfield genop_getfield_raw_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield def _genop_gc_load(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, sign_loc = arglocs diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -816,9 +816,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -183,6 +183,8 @@ return res def invalidate(self, descr): + if descr.is_always_pure(): + return for opinfo in self.cached_infos: assert isinstance(opinfo, info.AbstractStructPtrInfo) opinfo._fields[descr.get_index()] = None @@ -515,9 +517,14 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -525,23 +532,10 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) @@ -631,12 +625,12 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC_PURE(s, descr='inst_x') + # x = GETFIELD_GC(s, descr='inst_x') # pure # If 's' is a constant (after optimizations) we rely on the rest of the - # optimizations to constant-fold the following getfield_gc_pure. + # optimizations to constant-fold the following pure getfield_gc. # in addition, we record the dependency here to make invalidation work # correctly. - # NB: emitting the GETFIELD_GC_PURE is only safe because the + # NB: emitting the pure GETFIELD_GC is only safe because the # QUASIIMMUT_FIELD is also emitted to make sure the dependency is # registered. structvalue = self.ensure_ptr_info_arg0(op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print +from rpython.jit.metainterp.optimize import SpeculativeError @@ -374,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): @@ -736,12 +738,64 @@ self.emit_operation(op) def constant_fold(self, op): + self.protect_speculative_operation(op) argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] return execute_nonspec_const(self.cpu, None, op.getopnum(), argboxes, op.getdescr(), op.type) + def protect_speculative_operation(self, op): + """When constant-folding a pure operation that reads memory from + a gcref, make sure that the gcref is non-null and of a valid type. + Otherwise, raise SpeculativeError. This should only occur when + unrolling and optimizing the unrolled loop. Note that if + cpu.supports_guard_gc_type is false, we can't really do this + check at all, but then we don't unroll in that case. + """ + opnum = op.getopnum() + cpu = self.cpu + + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): + fielddescr = op.getdescr() + ref = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_field(ref, fielddescr) + return + + elif (opnum == rop.GETARRAYITEM_GC_PURE_I or + opnum == rop.GETARRAYITEM_GC_PURE_R or + opnum == rop.GETARRAYITEM_GC_PURE_F or + opnum == rop.ARRAYLEN_GC): + arraydescr = op.getdescr() + array = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_array(array, arraydescr) + if opnum == rop.ARRAYLEN_GC: + return + arraylength = cpu.bh_arraylen_gc(array, arraydescr) + + elif (opnum == rop.STRGETITEM or + opnum == rop.STRLEN): + string = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_string(string) + if opnum == rop.STRLEN: + return + arraylength = cpu.bh_strlen(string) + + elif (opnum == rop.UNICODEGETITEM or + opnum == rop.UNICODELEN): + unicode = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_unicode(unicode) + if opnum == rop.UNICODELEN: + return + arraylength = cpu.bh_unicodelen(unicode) + + else: + return + + index = self.get_constant_box(op.getarg(1)).getint() + if not (0 <= index < arraylength): + raise SpeculativeError + def is_virtual(self, op): if op.type == 'r': opinfo = self.getptrinfo(op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -94,7 +94,6 @@ break else: # all constant arguments: constant-fold away - self.protect_speculative_operation(op) resbox = self.optimizer.constant_fold(op) # note that INT_xxx_OVF is not done from here, and the # overflows in the INT_xxx operations are ignored @@ -119,59 +118,6 @@ if nextop: self.emit_operation(nextop) - def protect_speculative_operation(self, op): - """When constant-folding a pure operation that reads memory from - a gcref, make sure that the gcref is non-null and of a valid type. - Otherwise, raise SpeculativeError. This should only occur when - unrolling and optimizing the unrolled loop. Note that if - cpu.supports_guard_gc_type is false, we can't really do this - check at all, but then we don't unroll in that case. - """ - opnum = op.getopnum() - cpu = self.optimizer.cpu - - if (opnum == rop.GETFIELD_GC_PURE_I or - opnum == rop.GETFIELD_GC_PURE_R or - opnum == rop.GETFIELD_GC_PURE_F): - fielddescr = op.getdescr() - ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) - return - - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() - array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) - if opnum == rop.ARRAYLEN_GC: - return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) - - elif (opnum == rop.STRGETITEM or - opnum == rop.STRLEN): - string = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_string(string) - if opnum == rop.STRLEN: - return - arraylength = cpu.bh_strlen(string) - - elif (opnum == rop.UNICODEGETITEM or - opnum == rop.UNICODELEN): - unicode = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_unicode(unicode) - if opnum == rop.UNICODELEN: - return - arraylength = cpu.bh_unicodelen(unicode) - - else: - return - - index = self.get_constant_box(op.getarg(1)).getint() - if not (0 <= index < arraylength): - raise SpeculativeError - def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: opnum = opnum - rop._OVF_FIRST diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -521,8 +521,8 @@ def test_getfield(self): graph = self.build_dependency(""" [p0, p1] # 0: 1,2,5 - p2 = getfield_gc_r(p0) # 1: 3,5 - p3 = getfield_gc_r(p0) # 2: 4 + p2 = getfield_gc_r(p0, descr=valuedescr) # 1: 3,5 + p3 = getfield_gc_r(p0, descr=valuedescr) # 2: 4 guard_nonnull(p2) [p2] # 3: 4,5 guard_nonnull(p3) [p3] # 4: 5 jump(p0,p2) # 5: @@ -532,10 +532,10 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,5 + p13 = getfield_gc_r(p9, descr=valuedescr) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 - i14 = getfield_gc_i(p9) # 3: 5 - p15 = getfield_gc_r(p13) # 4: 5 + i14 = getfield_gc_i(p9, descr=valuedescr) # 3: 5 + p15 = getfield_gc_r(p13, descr=valuedescr) # 4: 5 guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -955,12 +955,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -969,17 +969,16 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) - """ - self.node.value = 5 + jump(7) + """ self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1343,7 +1342,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1355,7 +1354,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1597,7 +1596,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1608,7 +1607,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -5076,7 +5075,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ @@ -5462,15 +5461,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1409,12 +1409,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_pure_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1423,10 +1423,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_pure_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1436,20 +1436,20 @@ self.node.value = 5 self.optimize_loop(ops, expected) - def test_getfield_gc_pure_3(self): + def test_pure_getfield_gc_3(self): ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2319,7 +2319,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2332,7 +2332,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2340,11 +2340,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2352,8 +2353,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2669,7 +2669,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2680,7 +2680,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -3302,8 +3302,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -3330,8 +3330,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3522,14 +3522,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4878,11 +4878,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4972,18 +4972,16 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - - """ - self.node.value = 5 + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) + """ self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4992,7 +4990,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5002,7 +5000,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5010,7 +5008,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5021,7 +5019,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -6167,14 +6165,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -7229,13 +7227,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7246,7 +7244,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7298,11 +7296,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7325,11 +7323,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7568,7 +7566,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7578,7 +7576,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7852,14 +7850,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7868,7 +7866,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7883,14 +7881,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) - """ - expected = """ - [p0, p2, p1] + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) + """ + expected = """ + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -8065,7 +8063,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8160,9 +8158,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8201,14 +8199,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(ConstPtr(myptr), ConstPtr(myptr2), descr=otherdescr) + setfield_gc(ConstPtr(myptr), ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8232,14 +8230,14 @@ ops = """ [p0] p10 = getfield_gc_r(p0, descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(p0, ConstPtr(myptr2), descr=otherdescr) + setfield_gc(p0, ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8624,17 +8622,17 @@ [p10] p52 = getfield_gc_r(p10, descr=nextdescr) # inst_storage p54 = getarrayitem_gc_r(p52, 0, descr=arraydescr) - p69 = getfield_gc_pure_r(p54, descr=otherdescr) # inst_w_function + p69 = getfield_gc_r(p54, descr=otherdescr) # inst_w_function quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(descr=nodesize) p108 = new_array(3, descr=arraydescr) p110 = new_with_vtable(descr=nodesize) - setfield_gc(p110, ConstPtr(myptr2), descr=otherdescr) # inst_w_function + setfield_gc(p110, ConstPtr(myptrb), descr=otherdescr) # inst_w_function setarrayitem_gc(p108, 0, p110, descr=arraydescr) setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) @@ -8650,7 +8648,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ @@ -8852,13 +8850,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_r(p0, descr=nextdescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) @@ -8888,13 +8886,13 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarraydescr) + f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ expected = """ [p0] - ifoo = arraylen_gc(p0, descr=floatarraydescr) + ifoo = arraylen_gc(p0, descr=floatarrayimmutdescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -9102,7 +9100,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) jump(NULL, i3) """ @@ -9113,9 +9111,9 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) - jump(ConstPtr(myptr4), i3) + jump(ConstPtr(myptr2), i3) """ py.test.raises(InvalidLoop, self.optimize_loop, ops, ops) @@ -9265,9 +9263,126 @@ guard_value(i1, 5) [] jump() """ - a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE)), 5, zero=True) + a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) self.optimize_loop(ops, expected, jump_values=[a]) + def test_large_number_of_immutable_references(self): + ops = """ + [p0] + i0 = getfield_gc_i(p0, descr=bigadescr) + i1 = getfield_gc_i(p0, descr=bigbdescr) + i2 = getfield_gc_i(p0, descr=bigcdescr) + i3 = getfield_gc_i(p0, descr=bigddescr) + i4 = getfield_gc_i(p0, descr=bigedescr) + i5 = getfield_gc_i(p0, descr=bigfdescr) + i6 = getfield_gc_i(p0, descr=biggdescr) + i7 = getfield_gc_i(p0, descr=bighdescr) + i8 = getfield_gc_i(p0, descr=bigidescr) + i9 = getfield_gc_i(p0, descr=bigjdescr) + i10 = getfield_gc_i(p0, descr=bigkdescr) + i11 = getfield_gc_i(p0, descr=bigldescr) + i12 = getfield_gc_i(p0, descr=bigmdescr) + i13 = getfield_gc_i(p0, descr=bigndescr) + i14 = getfield_gc_i(p0, descr=bigodescr) + i15 = getfield_gc_i(p0, descr=bigpdescr) + i16 = getfield_gc_i(p0, descr=bigqdescr) + i17 = getfield_gc_i(p0, descr=bigrdescr) + i18 = getfield_gc_i(p0, descr=bigsdescr) + i19 = getfield_gc_i(p0, descr=bigtdescr) + i20 = getfield_gc_i(p0, descr=bigudescr) + i21 = getfield_gc_i(p0, descr=bigvdescr) + i22 = getfield_gc_i(p0, descr=bigwdescr) + i23 = getfield_gc_i(p0, descr=bigxdescr) + i24 = getfield_gc_i(p0, descr=bigydescr) + i25 = getfield_gc_i(p0, descr=bigzdescr) + i27 = getfield_gc_i(p0, descr=bigbdescr) + i28 = getfield_gc_i(p0, descr=bigcdescr) + i29 = getfield_gc_i(p0, descr=bigddescr) + i30 = getfield_gc_i(p0, descr=bigedescr) + i31 = getfield_gc_i(p0, descr=bigfdescr) + i32 = getfield_gc_i(p0, descr=biggdescr) + i33 = getfield_gc_i(p0, descr=bighdescr) + i34 = getfield_gc_i(p0, descr=bigidescr) + i35 = getfield_gc_i(p0, descr=bigjdescr) + i36 = getfield_gc_i(p0, descr=bigkdescr) + i37 = getfield_gc_i(p0, descr=bigldescr) + i38 = getfield_gc_i(p0, descr=bigmdescr) + i39 = getfield_gc_i(p0, descr=bigndescr) + i40 = getfield_gc_i(p0, descr=bigodescr) + i41 = getfield_gc_i(p0, descr=bigpdescr) + i42 = getfield_gc_i(p0, descr=bigqdescr) + i43 = getfield_gc_i(p0, descr=bigrdescr) + i44 = getfield_gc_i(p0, descr=bigsdescr) + i45 = getfield_gc_i(p0, descr=bigtdescr) + i46 = getfield_gc_i(p0, descr=bigudescr) + i47 = getfield_gc_i(p0, descr=bigvdescr) + i48 = getfield_gc_i(p0, descr=bigwdescr) + i49 = getfield_gc_i(p0, descr=bigxdescr) + i50 = getfield_gc_i(p0, descr=bigydescr) + i51 = getfield_gc_i(p0, descr=bigzdescr) + i26 = getfield_gc_i(p0, descr=bigadescr) + i99 = int_add(i26, i51) + escape_i(i27) + escape_i(i28) + escape_i(i29) + escape_i(i30) + escape_i(i31) + escape_i(i32) + escape_i(i33) + escape_i(i34) + escape_i(i35) + escape_i(i36) + escape_i(i37) + escape_i(i38) + escape_i(i39) + escape_i(i40) + escape_i(i41) + escape_i(i42) + escape_i(i43) + escape_i(i44) + escape_i(i45) + escape_i(i46) + escape_i(i47) + escape_i(i48) + escape_i(i49) + escape_i(i50) + escape_i(i51) + escape_i(i26) + escape_i(i99) + jump(p0) + """ + expected = """ + [p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99] + escape_i(i1) + escape_i(i2) + escape_i(i3) + escape_i(i4) + escape_i(i5) + escape_i(i6) + escape_i(i7) + escape_i(i8) + escape_i(i9) + escape_i(i10) + escape_i(i11) + escape_i(i12) + escape_i(i13) + escape_i(i14) + escape_i(i15) + escape_i(i16) + escape_i(i17) + escape_i(i18) + escape_i(i19) + escape_i(i20) + escape_i(i21) + escape_i(i22) + escape_i(i23) + escape_i(i24) + escape_i(i25) + escape_i(i0) + escape_i(i99) + jump(p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -220,16 +220,16 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr) + pc = getfield_gc_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches - p1 = getfield_gc_r(pc, descr=nextdescr) - i0 = getfield_gc_i(p1, descr=valuedescr) + p1 = getfield_gc_r(pc, descr=nextdescr3) + i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) assert len(es.short_boxes) == 4 # both getfields are available as - # well as getfield_gc_pure + # well as getfield_gc def test_p123_anti_nested(self): loop = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,4 +1,4 @@ -import py, random +import py, random, string from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -122,7 +122,14 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + + big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] + BIG = lltype.GcForwardReference() + BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) + + for field, _ in big_fields: + locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) + node = lltype.malloc(NODE) node.value = 5 node.next = node @@ -133,16 +140,25 @@ node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) - mynode2 = lltype.malloc(NODE) + mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) - mynode2.parent.typeptr = node_vtable - myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode2) - mynode3 = lltype.malloc(NODE2) - mynode3.parent.parent.typeptr = node_vtable2 + mynodeb.parent.typeptr = node_vtable + myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) + myptr2 = lltype.malloc(NODE2) + myptr2.parent.parent.typeptr = node_vtable2 + myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) + nullptr = lltype.nullptr(llmemory.GCREF.TO) + + mynode3 = lltype.malloc(NODE3) + mynode3.parent.typeptr = node_vtable3 + mynode3.value = 7 + mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) # a NODE2 mynode4 = lltype.malloc(NODE3) mynode4.parent.typeptr = node_vtable3 myptr4 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode4) # a NODE3 + + nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) @@ -203,7 +219,6 @@ arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) int32arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.INT)) int16arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.SHORT)) - floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) float32arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.SingleFloat)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) @@ -212,6 +227,12 @@ array2ref = lltype.cast_opaque_ptr(llmemory.GCREF, array2) gcarraydescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF)) gcarraydescr_tid = gcarraydescr.get_type_id() + floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) + + arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) + gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) + floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) @@ -244,7 +265,7 @@ tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') - arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE))) + arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1103,8 +1103,8 @@ jump(p0) """ self.optimize_bridge(loops, bridge, loops[0], 'Loop0', [self.myptr]) - self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr3]) - self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr3]) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr2]) + self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr2]) self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', [self.nullptr]) self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', [self.nullptr]) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -188,12 +188,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I - def optimize_SETFIELD_GC(self, op): struct = op.getarg(0) opinfo = self.getptrinfo(struct) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -653,46 +653,37 @@ @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_I, fielddescr, box) + return ConstInt(resbox) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_I, box, fielddescr, 'i') + + @arguments("box", "descr") + def opimpl_getfield_gc_f(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resvalue = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_F, fielddescr, box) + return ConstFloat(resvalue) + return self._opimpl_getfield_gc_any_pureornot( + rop.GETFIELD_GC_F, box, fielddescr, 'f') + @arguments("box", "descr") def opimpl_getfield_gc_r(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_R, fielddescr, box) + return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_R, box, fielddescr, 'r') - @arguments("box", "descr") - def opimpl_getfield_gc_f(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_i_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_I, fielddescr, box) - return ConstInt(resbox) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_I, box, fielddescr, 'i') - - @arguments("box", "descr") - def opimpl_getfield_gc_f_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_F, fielddescr, box) - return ConstFloat(resvalue) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_r_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_R, fielddescr, box) - return ConstPtr(val) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_R, box, fielddescr, 'r') + + opimpl_getfield_gc_i_pure = opimpl_getfield_gc_i + opimpl_getfield_gc_r_pure = opimpl_getfield_gc_r + opimpl_getfield_gc_f_pure = opimpl_getfield_gc_f @arguments("box", "box", "descr") def opimpl_getinteriorfield_gc_i(self, array, index, descr): @@ -733,7 +724,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info - opnum = OpHelpers.getfield_pure_for_descr(fielddescr) + opnum = OpHelpers.getfield_for_descr(fielddescr) if (ginfo is not None and fielddescr in ginfo.green_field_descrs and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't @@ -2104,17 +2095,7 @@ profiler = self.staticdata.profiler profiler.count_ops(opnum) resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) - # - is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST - if not is_pure: - if (opnum == rop.GETFIELD_RAW_I or - opnum == rop.GETFIELD_RAW_R or - opnum == rop.GETFIELD_RAW_F or - opnum == rop.GETARRAYITEM_RAW_I or - opnum == rop.GETARRAYITEM_RAW_F): - is_pure = descr.is_always_pure() - # - if is_pure: + if OpHelpers.is_pure_with_descr(opnum, descr): return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -231,7 +231,7 @@ class AbstractResOpOrInputArg(AbstractValue): _attrs_ = ('_forwarded',) - _forwarded = None # either another resop or OptInfo + _forwarded = None # either another resop or OptInfo def get_forwarded(self): return self._forwarded @@ -412,6 +412,8 @@ return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST def is_always_pure(self): + # Tells whether an operation is pure based solely on the opcode. + # Other operations (e.g. getfield ops) may be pure in some cases are well. return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): @@ -434,9 +436,7 @@ return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) def is_getarrayitem(self): return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, @@ -1602,14 +1602,6 @@ return rop.CALL_LOOPINVARIANT_N @staticmethod - def getfield_pure_for_descr(descr): - if descr.is_pointer_field(): - return rop.GETFIELD_GC_PURE_R - elif descr.is_float_field(): - return rop.GETFIELD_GC_PURE_F - return rop.GETFIELD_GC_PURE_I - - @staticmethod def getfield_for_descr(descr): if descr.is_pointer_field(): return rop.GETFIELD_GC_R @@ -1760,4 +1752,26 @@ opnum = rop.VEC_UNPACK_F return VecOperationNew(opnum, args, datatype, bytesize, signed, count) + @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + @staticmethod + def is_pure_with_descr(opnum, descr): + is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST + if not is_pure: + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + is_pure = descr.is_always_pure() + return is_pure + diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -320,7 +320,7 @@ assert res == 252 self.check_trace_count(1) self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, - 'getfield_gc_pure_i': 1, 'int_mul': 1, + 'getfield_gc_i': 1, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): @@ -1405,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) + self.check_operations_history(setfield_gc=2, getfield_gc_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -2522,7 +2522,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_resops(getfield_gc_pure_r=2) + self.check_resops(getfield_gc_r=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -19,7 +19,7 @@ return y.x + 5 res = self.interp_operations(f, [23]) assert res == 28 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=1, int_add=1) + self.check_operations_history(getfield_gc_i=1, int_add=1) def test_fields_subclass(self): class X(object): @@ -41,8 +41,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def f(x, y): # this time, the field 'x' only shows up on subclass 'Y' @@ -50,8 +49,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def test_array(self): class X(object): @@ -66,8 +64,7 @@ return a.y[index] res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_index_error(self): class X(object): @@ -89,8 +86,7 @@ return a.get(index) res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_in_immutable(self): class X(object): @@ -106,8 +102,7 @@ return y.lst[index] + y.y + 5 res = self.interp_operations(f, [23, 0], listops=True) assert res == 23 + 24 + 5 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getfield_gc_pure_i=1, + self.check_operations_history(getfield_gc_r=1, getfield_gc_i=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1, int_add=3) diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -74,7 +74,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -101,7 +101,7 @@ res = self.meta_interp(f, [100, 7], enable_opts="") assert res == 700 # there should be no getfields, even though optimizations are turned off - self.check_resops(guard_not_invalidated=1, getfield_gc=0) + self.check_resops(guard_not_invalidated=1) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -124,8 +124,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, - getfield_gc_pure_i=2) + self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, getfield_gc_i=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -156,7 +155,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -208,7 +207,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=0) + self.check_resops(guard_not_invalidated=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -234,7 +233,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_resops(guard_not_invalidated=4, getfield_gc=0) + self.check_resops(guard_not_invalidated=4) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -348,7 +347,7 @@ res = self.meta_interp(f, [100, 30]) assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, - call_may_force=0, getfield_gc=0) + call_may_force=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -374,8 +373,7 @@ getarrayitem_gc_pure_r=0, getarrayitem_gc_i=0, getarrayitem_gc_r=0, - getfield_gc_i=0, getfield_gc_pure_i=0, - getfield_gc_r=0, getfield_gC_pure_r=0) + getfield_gc_i=0, getfield_gc_r=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -405,9 +403,7 @@ assert res == 700 # operations must have been removed by the frontend self.check_resops(getarrayitem_gc_pure_i=0, guard_not_invalidated=1, - getarrayitem_gc_i=0, - getfield_gc=0, getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + getarrayitem_gc_i=0, getfield_gc_i=0, getfield_gc_r=0) def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -436,10 +436,10 @@ return p.x[0] + p.x[1] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == -7 - 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) def test_heap_caching_and_elidable_function(self): class A: @@ -517,12 +517,12 @@ return a1[0] + a2[0] + gn(a1, a2) res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) def test_heap_caching_multiple_arrays(self): class Gbl(object): diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1077,7 +1077,7 @@ res = self.meta_interp(f, [], repeat=7) assert res == f() - def test_getfield_gc_pure_nobug(self): + def test_pure_getfield_gc_nobug(self): mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): From pypy.commits at gmail.com Thu Jan 21 16:22:48 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 21 Jan 2016 13:22:48 -0800 (PST) Subject: [pypy-commit] pypy default: Mention remove-getfield-pure branch in whatsnew-head Message-ID: <56a14c28.05bd1c0a.82596.0b09@mx.google.com> Author: Spenser Bauman Branch: Changeset: r81899:35c27789e353 Date: 2016-01-21 16:22 -0500 http://bitbucket.org/pypy/pypy/changeset/35c27789e353/ Log: Mention remove-getfield-pure branch in whatsnew-head diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -123,3 +123,13 @@ .. branch: fix-cpython-ssl-tests-2.7 Fix SSL tests by importing cpython's patch + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + From pypy.commits at gmail.com Fri Jan 22 02:37:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 23:37:00 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: unicode endian test issue in pypy module Message-ID: <56a1dc1c.c5321c0a.8b828.ffff82c0@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81900:d2af83605cf2 Date: 2016-01-22 08:36 +0100 http://bitbucket.org/pypy/pypy/changeset/d2af83605cf2/ Log: unicode endian test issue in pypy module diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: From pypy.commits at gmail.com Fri Jan 22 02:44:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 21 Jan 2016 23:44:31 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed debug print statement, return value of closure changed to long. a closure return value must fill a full ffi_arg Message-ID: <56a1dddf.2851c20a.c3793.ffffa4e3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81901:4cafa238dca9 Date: 2016-01-22 08:43 +0100 http://bitbucket.org/pypy/pypy/changeset/4cafa238dca9/ Log: removed debug print statement, return value of closure changed to long. a closure return value must fill a full ffi_arg diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] From pypy.commits at gmail.com Fri Jan 22 03:18:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Jan 2016 00:18:22 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: test and fix for write_necessary logic when mixing ints and objects Message-ID: <56a1e5ce.11301c0a.52de.ffff913c@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81902:c1866643eecd Date: 2016-01-22 09:13 +0100 http://bitbucket.org/pypy/pypy/changeset/c1866643eecd/ Log: test and fix for write_necessary logic when mixing ints and objects diff --git a/rpython/rlib/heapprof.py b/rpython/rlib/heapprof.py --- a/rpython/rlib/heapprof.py +++ b/rpython/rlib/heapprof.py @@ -108,8 +108,9 @@ # call write_necessary if there is already a value there assert not status == SEEN_NOTHING if status == SEEN_CONSTANT_INT: - return (self.is_int(w_value) and - self.read_constant_int() != self.get_int_val(w_value)) + if not self.is_int(w_value): + return True + return self.read_constant_int() != self.get_int_val(w_value) elif status == SEEN_CONSTANT_OBJ: prev_obj = self.try_read_constant_obj() return prev_obj is not w_value diff --git a/rpython/rlib/test/test_heapprof.py b/rpython/rlib/test/test_heapprof.py --- a/rpython/rlib/test/test_heapprof.py +++ b/rpython/rlib/test/test_heapprof.py @@ -143,6 +143,13 @@ res = v.write_necessary(ValueInt(1)) assert res + v = HeapProf() + assert v._hprof_status == SEEN_NOTHING + v.see_write(ValueInt(1)) + res = v.write_necessary(Value()) + assert res + + def test_write_not_necessary_obj(): v = HeapProf() assert v._hprof_status == SEEN_NOTHING @@ -159,3 +166,4 @@ v.see_write(Value()) res = v.write_necessary(Value()) assert res + From pypy.commits at gmail.com Fri Jan 22 03:18:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Jan 2016 00:18:24 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: test that found the problem of the previous commit Message-ID: <56a1e5d0.a5c9c20a.809ac.ffffb4ca@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81903:7e6dd66f2318 Date: 2016-01-22 09:17 +0100 http://bitbucket.org/pypy/pypy/changeset/7e6dd66f2318/ Log: test that found the problem of the previous commit diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -931,6 +931,21 @@ d = x.__dict__ assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1] + def test_bug_two_attributes(self): + class A(object): + def __setitem__(self, key, value): + self.setkey = key + self.setvalue = value + a1 = A() + a2 = A() + a1[a2] = 42 + assert a1.setkey is a2 + assert a1.setvalue == 42 + # + a1[42] = a2 + assert a1.setkey == 42 + assert a1.setvalue is a2 + class AppTestWithMapDictAndCounters(object): spaceconfig = {"objspace.std.withmapdict": True, From pypy.commits at gmail.com Fri Jan 22 03:21:24 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 00:21:24 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: big endian test issue cpyext Message-ID: <56a1e684.6217c20a.dcde6.ffffb5ef@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81904:43c90f2f520e Date: 2016-01-22 09:20 +0100 http://bitbucket.org/pypy/pypy/changeset/43c90f2f520e/ Log: big endian test issue cpyext diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,20 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') From pypy.commits at gmail.com Fri Jan 22 03:35:05 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 00:35:05 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: big endian issue while testing cpyext (PyUnicode_DecodeUTF16) Message-ID: <56a1e9b9.034cc20a.9c04f.ffffb849@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81905:b1b31c094879 Date: 2016-01-22 09:34 +0100 http://bitbucket.org/pypy/pypy/changeset/b1b31c094879/ Log: big endian issue while testing cpyext (PyUnicode_DecodeUTF16) diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -57,7 +57,6 @@ buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian if sys.byteorder == 'big': assert str(buf) == ('\0\0\0\x01' '\0\0\0\x02' diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -386,11 +386,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) From pypy.commits at gmail.com Fri Jan 22 03:36:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 00:36:53 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: the same for the UTF32 test Message-ID: <56a1ea25.284cc20a.b44fb.ffffb77a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81906:9df502324ca5 Date: 2016-01-22 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/9df502324ca5/ Log: the same for the UTF32 test diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -423,7 +423,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) From pypy.commits at gmail.com Fri Jan 22 04:00:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 01:00:18 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: endian test issue marshal, test decoded value using platform endianess (not desired for marshall module) Message-ID: <56a1efa2.e935c20a.8d22c.ffffc1ca@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81907:359e329036d5 Date: 2016-01-22 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/359e329036d5/ Log: endian test issue marshal, test decoded value using platform endianess (not desired for marshall module) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -64,14 +64,17 @@ import marshal, struct class FakeM: + # NOTE: marshal is platform independent, running this test must assume + # that seen gets values from the endianess of the marshal module. + # (which is little endian!) def __init__(self): self.seen = [] def start(self, code): self.seen.append(code) def put_int(self, value): - self.seen.append(struct.pack("i", value)) + self.seen.append(struct.pack(" Author: Richard Plangger Branch: s390x-backend Changeset: r81908:6b28800745be Date: 2016-01-22 10:21 +0100 http://bitbucket.org/pypy/pypy/changeset/6b28800745be/ Log: numpy dtype fixes in the test suite diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -65,7 +65,7 @@ class FakeM: # NOTE: marshal is platform independent, running this test must assume - # that seen gets values from the endianess of the marshal module. + # that self.seen gets values from the endianess of the marshal module. # (which is little endian!) def __init__(self): self.seen = [] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -351,7 +351,10 @@ assert np.dtype(xyz).name == 'xyz' # another obscure API, used in numpy record.py a = np.dtype((xyz, [('x', 'int32'), ('y', 'float32')])) - assert "[('x', 'i4'), ('y', '>f4')]" in repr(a) + else: + assert "[('x', 'i4" + E = '<' if sys.byteorder == 'little' else '>' + b = np.dtype((xyz, [("col1", E+"i4"), ("col2", E+"i4"), ("col3", E+"i4")])) data = [(1, 2,3), (4, 5, 6)] a = np.array(data, dtype=b) x = pickle.loads(pickle.dumps(a)) @@ -423,18 +429,20 @@ assert hash(t5) != hash(t6) def test_pickle(self): + import sys import numpy as np from numpy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) + E = '<' if sys.byteorder == 'little' else '>' if self.ptr_size == 8: - assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, E, None, None, None, -1, -1, 0)) else: - assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, E, None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) - assert np.dtype(('' d = np.dtype('f8') d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' @@ -1201,7 +1210,7 @@ assert d.shape == (2,) assert d.itemsize == 8 assert d.subdtype is not None - assert repr(d) == "dtype(('' + assert str(dt) == "{'names':['f0','f1'], 'formats':['%si4','u1'], 'offsets':[0,4], 'itemsize':8, 'aligned':True}" % E dt = np.dtype([('f1', 'u1'), ('f0', 'i4')], align=True) - assert str(dt) == "{'names':['f1','f0'], 'formats':['u1',' Author: Richard Plangger Branch: s390x-backend Changeset: r81909:272b467ba7dd Date: 2016-01-22 10:24 +0100 http://bitbucket.org/pypy/pypy/changeset/272b467ba7dd/ Log: two more dtype tests fixed (endian issues) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -924,6 +924,7 @@ def test_dtype_str(self): from numpy import dtype + import sys byteorder = self.native_prefix assert dtype('i8').str == byteorder + 'i8' assert dtype('')+'U7' assert dtype([('', 'f8')]).str == "|V8" assert dtype(('f8', 2)).str == "|V16" @@ -976,8 +978,12 @@ def test_isnative(self): from numpy import dtype + import sys assert dtype('i4').isnative == True - assert dtype('>i8').isnative == False + if sys.byteorder == 'big': + assert dtype('i8').isnative == False def test_any_all_nonzero(self): import numpy From pypy.commits at gmail.com Fri Jan 22 04:56:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 01:56:59 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: half way through the ndarray tests (endian issues) Message-ID: <56a1fceb.2aacc20a.4b24d.ffffd6c6@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81910:d1a60e575946 Date: 2016-01-22 10:56 +0100 http://bitbucket.org/pypy/pypy/changeset/d1a60e575946/ Log: half way through the ndarray tests (endian issues) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1791,6 +1791,7 @@ def test_scalar_view(self): from numpy import array + import sys a = array(3, dtype='int32') b = a.view(dtype='float32') assert b.shape == () @@ -1799,17 +1800,27 @@ assert exc.value[0] == "new type not compatible with array." exc = raises(TypeError, a.view, 'string') assert exc.value[0] == "data-type must not be 0-sized" - assert a.view('S4') == '\x03' + if sys.byteorder == 'big': + assert a.view('S4') == '\x00\x00\x00\x03' + else: + assert a.view('S4') == '\x03' a = array('abc1', dtype='c') assert (a == ['a', 'b', 'c', '1']).all() assert a.view('S4') == 'abc1' b = a.view([('a', 'i2'), ('b', 'i2')]) assert b.shape == (1,) - assert b[0][0] == 25185 - assert b[0][1] == 12643 + if sys.byteorder == 'big': + assert b[0][0] == 0x6162 + assert b[0][1] == 0x6331 + else: + assert b[0][0] == 25185 + assert b[0][1] == 12643 a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () - assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' + if sys.byteorder == 'big': + assert a.view('S16') == '\x00' * 7 + '\x01' + '\x00' * 7 + '\x02' + else: + assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' a = array(2, dtype=' Author: Richard Plangger Branch: s390x-backend Changeset: r81911:47a85e21bb1b Date: 2016-01-22 12:00 +0100 http://bitbucket.org/pypy/pypy/changeset/47a85e21bb1b/ Log: and the other part of the bigendian issues (micronumpy tests) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3500,7 +3500,11 @@ BaseNumpyAppTest.setup_class.im_func(cls) cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4)) cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3)) - cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16 + import sys + if sys.byteorder == 'big': + cls.w_float16val = cls.space.wrap('E\x00') # 5.0 in float16 + else: + cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16 cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) @@ -3608,9 +3612,15 @@ assert (t == []).all() u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int) if sys.maxint > 2 ** 31 - 1: - assert (u == [1]).all() + if sys.byteorder == 'big': + assert (u == [0x0100000000000000]).all() + else: + assert (u == [1]).all() else: - assert (u == [1, 0]).all() + if sys.byteorder == 'big': + assert (u == [0x01000000, 0]).all() + else: + assert (u == [1, 0]).all() v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" @@ -3667,9 +3677,15 @@ k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) dt = array([5], dtype='longfloat').dtype + print(dt.itemsize) if dt.itemsize == 8: - m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', - dtype='float64') + import sys + if sys.byteorder == 'big': + m = fromstring('@\x14\x00\x00\x00\x00\x00\x00', + dtype='float64') + else: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') @@ -3691,8 +3707,13 @@ def test_tostring(self): from numpy import array - assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' - assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + import sys + if sys.byteorder == 'big': + assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' + else: + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' @@ -4188,7 +4209,10 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) - assert v[0][-1] == 2.53125 + if sys.byteorder == 'big': + assert v[0][-2] == 2.53125 + else: + assert v[0][-1] == 2.53125 exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -109,6 +109,7 @@ def test_pickle(self): from numpy import dtype, zeros + import sys try: from numpy.core.multiarray import scalar except ImportError: @@ -119,9 +120,11 @@ f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) - assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00')) - assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@')) - assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@')) + swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s + assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00'))) + assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@'))) + assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \ + swap('\x00\x00\x00\x00\x00\x80B@'))) assert loads(dumps(i)) == i assert loads(dumps(f)) == f @@ -256,13 +259,20 @@ assert t < 7e-323 t = s.view('complex64') assert type(t) is np.complex64 - assert 0 < t.real < 1 - assert t.imag == 0 + if sys.byteorder == 'big': + assert 0 < t.imag < 1 + assert t.real == 0 + else: + assert 0 < t.real < 1 + assert t.imag == 0 exc = raises(TypeError, s.view, 'string') assert exc.value[0] == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ - assert t == '\x0c' + if sys.byteorder == 'big': + assert t == '\x00' * 7 + '\x0c' + else: + assert t == '\x0c' s = np.dtype('string').type('abc1') assert s.view('S4') == 'abc1' if '__pypy__' in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -327,10 +327,15 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): from numpy import array, dtype - a = array(range(11), dtype='float64') - c = a.astype(dtype('' D.__module__ = 'mod' mod = new.module('mod') mod.D = D @@ -510,7 +511,7 @@ tp9 Rp10 (I3 - S'<' + S'{E}' p11 NNNI-1 I-1 @@ -520,7 +521,7 @@ S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' p13 tp14 - b.'''.replace(' ','') + b.'''.replace(' ','').format(E=E) for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): if len(ss)>10: # ignore binary data, it will be checked later From pypy.commits at gmail.com Fri Jan 22 06:47:32 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Jan 2016 03:47:32 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56a216d4.cf0b1c0a.fc7b4.ffffdb3b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r694:50a0e13fff46 Date: 2016-01-22 12:47 +0100 http://bitbucket.org/pypy/pypy.org/changeset/50a0e13fff46/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $62736 of $105000 (59.7%) + $62755 of $105000 (59.8%)
      @@ -23,7 +23,7 @@
    • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30344 of $80000 (37.9%) + $30354 of $80000 (37.9%)
      @@ -25,7 +25,7 @@
    • From pypy.commits at gmail.com Fri Jan 22 06:52:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 03:52:44 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed callsite of clibffi with the same big endian issues as found yesterday evening Message-ID: <56a2180c.6918c20a.261a0.fffffecf@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81912:6840459f9b22 Date: 2016-01-22 12:51 +0100 http://bitbucket.org/pypy/pypy/changeset/6840459f9b22/ Log: fixed callsite of clibffi with the same big endian issues as found yesterday evening diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4209,6 +4209,7 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) + import sys if sys.byteorder == 'big': assert v[0][-2] == 2.53125 else: diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -597,6 +597,9 @@ size = adjust_return_size(intmask(restype.c_size)) self.ll_result = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw') + self.restype_size = intmask(restype.c_size) + else: + self.restype_size = -1 def push_arg(self, value): #if self.pushed_args == self.argnum: @@ -633,7 +636,12 @@ rffi.cast(VOIDPP, self.ll_args)) if RES_TP is not lltype.Void: TP = lltype.Ptr(rffi.CArray(RES_TP)) - res = rffi.cast(TP, self.ll_result)[0] + ptr = self.ll_result + if _BIG_ENDIAN and self.restype_size != -1: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - self.restype_size + ptr = rffi.ptradd(ptr, n) + res = rffi.cast(TP, ptr)[0] else: res = None self._clean_args() diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py --- a/rpython/rlib/rstruct/test/test_runpack.py +++ b/rpython/rlib/rstruct/test/test_runpack.py @@ -6,11 +6,13 @@ class TestRStruct(BaseRtypingTest): def test_unpack(self): + import sys pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes def fn(): return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1] - assert fn() == 3 - assert self.interpret(fn, []) == 3 + result = 3 if sys.byteorder == 'little' else 3 << (LONG_BIT-8) + assert fn() == result + assert self.interpret(fn, []) == result def test_unpack_2(self): data = struct.pack('iiii', 0, 1, 2, 4) diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py --- a/rpython/rlib/test/test_clibffi.py +++ b/rpython/rlib/test/test_clibffi.py @@ -181,11 +181,12 @@ p_a2 = rffi.cast(rffi.VOIDPP, ll_args[1])[0] a1 = rffi.cast(rffi.INTP, p_a1)[0] a2 = rffi.cast(rffi.INTP, p_a2)[0] - res = rffi.cast(rffi.INTP, ll_res) + res = rffi.cast(rffi.SIGNEDP, ll_res) + # must store a full ffi arg! if a1 > a2: - res[0] = rffi.cast(rffi.INT, 1) + res[0] = 1 else: - res[0] = rffi.cast(rffi.INT, -1) + res[0] = -1 ptr = CallbackFuncPtr([ffi_type_pointer, ffi_type_pointer], ffi_type_sint, callback) From pypy.commits at gmail.com Fri Jan 22 08:29:04 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 05:29:04 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: we can have is_recursive but no get_unique_id Message-ID: <56a22ea0.a3f6c20a.6894d.1d0c@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81913:9fcd011d9607 Date: 2016-01-22 14:28 +0100 http://bitbucket.org/pypy/pypy/changeset/9fcd011d9607/ Log: we can have is_recursive but no get_unique_id diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -623,8 +623,8 @@ raise AttributeError("no 'greens' or 'reds' supplied") if virtualizables is not None: self.virtualizables = virtualizables - if get_unique_id is not None or is_recursive: - assert get_unique_id is not None and is_recursive, "get_unique_id and is_recursive must be specified at the same time" + if get_unique_id is not None: + assert is_recursive, "get_unique_id and is_recursive must be specified at the same time" for v in self.virtualizables: assert v in self.reds # if reds are automatic, they won't be passed to jit_merge_point, so From pypy.commits at gmail.com Fri Jan 22 08:57:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 05:57:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: macros (e.g. WCOREDUMP) got parameter type Signed, on little endian this does not make a difference, but it does on big endian. changed to rffi.INT Message-ID: <56a23537.11181c0a.689f.06de@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81914:43db866bfbb3 Date: 2016-01-22 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/43db866bfbb3/ Log: macros (e.g. WCOREDUMP) got parameter type Signed, on little endian this does not make a difference, but it does on big endian. changed to rffi.INT diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -871,7 +871,7 @@ lltype.free(status_p, flavor='raw') def _make_waitmacro(name): - c_func = external(name, [lltype.Signed], lltype.Signed, + c_func = external(name, [rffi.INT], lltype.Signed, macro=_MACRO_ON_POSIX) returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG') From pypy.commits at gmail.com Fri Jan 22 09:13:09 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 06:13:09 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix that test Message-ID: <56a238f5.e935c20a.8d22c.334b@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81915:900de81cdc5e Date: 2016-01-22 15:11 +0100 http://bitbucket.org/pypy/pypy/changeset/900de81cdc5e/ Log: fix that test diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -1312,7 +1312,7 @@ return (code + 1) * 2 driver = JitDriver(greens=["pc", "code"], reds='auto', - get_unique_id=get_unique_id) + get_unique_id=get_unique_id, is_recursive=True) def f(pc, code): i = 0 From pypy.commits at gmail.com Fri Jan 22 09:13:10 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 06:13:10 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: import os x related hacks for threads & vmprof Message-ID: <56a238f6.44e21c0a.7de24.10a7@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81916:670f46cb2434 Date: 2016-01-22 15:12 +0100 http://bitbucket.org/pypy/pypy/changeset/670f46cb2434/ Log: import os x related hacks for threads & vmprof diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h new file mode 100644 --- /dev/null +++ b/rpython/rlib/rvmprof/src/vmprof_common.h @@ -0,0 +1,71 @@ +#include + +#define MAX_FUNC_NAME 1024 + +static int profile_file = -1; +static long prepare_interval_usec = 0; +static long profile_interval_usec = 0; +static int opened_profile(char *interp_name); + +#define MAX_STACK_DEPTH \ + ((SINGLE_BUF_SIZE - sizeof(struct prof_stacktrace_s)) / sizeof(void *)) + +#define MARKER_STACKTRACE '\x01' +#define MARKER_VIRTUAL_IP '\x02' +#define MARKER_TRAILER '\x03' +#define MARKER_INTERP_NAME '\x04' /* deprecated */ +#define MARKER_HEADER '\x05' + +#define VERSION_BASE '\x00' +#define VERSION_THREAD_ID '\x01' + +typedef struct prof_stacktrace_s { + char padding[sizeof(long) - 1]; + char marker; + long count, depth; + void *stack[]; +} prof_stacktrace_s; + + +RPY_EXTERN +char *vmprof_init(int fd, double interval, char *interp_name) +{ + if (interval < 1e-6 || interval >= 1.0) + return "bad value for 'interval'"; + prepare_interval_usec = (int)(interval * 1000000.0); + + if (prepare_concurrent_bufs() < 0) + return "out of memory"; + + assert(fd >= 0); + profile_file = fd; + if (opened_profile(interp_name) < 0) { + profile_file = -1; + return strerror(errno); + } + return NULL; +} + +static int _write_all(const char *buf, size_t bufsize); + +static int opened_profile(char *interp_name) +{ + struct { + long hdr[5]; + char interp_name[259]; + } header; + + size_t namelen = strnlen(interp_name, 255); + + header.hdr[0] = 0; + header.hdr[1] = 3; + header.hdr[2] = 0; + header.hdr[3] = prepare_interval_usec; + header.hdr[4] = 0; + header.interp_name[0] = MARKER_HEADER; + header.interp_name[1] = '\x00'; + header.interp_name[2] = VERSION_THREAD_ID; + header.interp_name[3] = namelen; + memcpy(&header.interp_name[4], interp_name, namelen); + return _write_all((char*)&header, 5 * sizeof(long) + 4 + namelen); +} diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -35,11 +35,12 @@ #include "vmprof_getpc.h" #include "vmprof_mt.h" #include "vmprof_stack.h" +#include "vmprof_common.h" /************************************************************/ -static int profile_file = -1; static long prepare_interval_usec; +static long saved_profile_file; static struct profbuf_s *volatile current_codes; static void *(*mainloop_get_virtual_ip)(char *) = 0; @@ -47,26 +48,6 @@ static void flush_codes(void); - -RPY_EXTERN -char *vmprof_init(int fd, double interval, char *interp_name) -{ - if (interval < 1e-6 || interval >= 1.0) - return "bad value for 'interval'"; - prepare_interval_usec = (int)(interval * 1000000.0); - - if (prepare_concurrent_bufs() < 0) - return "out of memory"; - - assert(fd >= 0); - profile_file = fd; - if (opened_profile(interp_name) < 0) { - profile_file = -1; - return strerror(errno); - } - return NULL; -} - /************************************************************/ /* value: last bit is 1 if signals must be ignored; all other bits @@ -94,28 +75,6 @@ * ************************************************************* */ -#define MAX_FUNC_NAME 128 -#define MAX_STACK_DEPTH \ - ((SINGLE_BUF_SIZE - sizeof(struct prof_stacktrace_s)) / sizeof(void *)) - -#define MARKER_STACKTRACE '\x01' -#define MARKER_VIRTUAL_IP '\x02' -#define MARKER_TRAILER '\x03' -#define MARKER_INTERP_NAME '\x04' /* deprecated */ -#define MARKER_HEADER '\x05' - -#define VERSION_BASE '\x00' -#define VERSION_THREAD_ID '\x01' -#define VERSION_TAG '\x02' - -struct prof_stacktrace_s { - char padding[sizeof(long) - 1]; - char marker; - long count, depth; - intptr_t stack[]; -}; - -static long profile_interval_usec = 0; static char atfork_hook_installed = 0; @@ -194,8 +153,43 @@ * ************************************************************* */ +#include + +volatile int spinlock; +jmp_buf restore_point; + +static void segfault_handler(int arg) +{ + longjmp(restore_point, SIGSEGV); +} + static void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext) { +#ifdef __APPLE__ + // TERRIBLE HACK AHEAD + // on OS X, the thread local storage is sometimes uninitialized + // when the signal handler runs - it means it's impossible to read errno + // or call any syscall or read PyThread_Current or pthread_self. Additionally, + // it seems impossible to read the register gs. + // here we register segfault handler (all guarded by a spinlock) and call + // longjmp in case segfault happens while reading a thread local + while (__sync_lock_test_and_set(&spinlock, 1)) { + } + signal(SIGSEGV, &segfault_handler); + int fault_code = setjmp(restore_point); + if (fault_code == 0) { + pthread_self(); + get_current_thread_id(); + } else { + signal(SIGSEGV, SIG_DFL); + __sync_synchronize(); + spinlock = 0; + return; + } + signal(SIGSEGV, SIG_DFL); + __sync_synchronize(); + spinlock = 0; +#endif long val = __sync_fetch_and_add(&signal_handler_value, 2L); if ((val & 1) == 0) { @@ -212,10 +206,8 @@ struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; st->marker = MARKER_STACKTRACE; st->count = 1; - //st->stack[0] = GetPC((ucontext_t*)ucontext); depth = get_stack_trace(st->stack, MAX_STACK_DEPTH-2, GetPC((ucontext_t*)ucontext), ucontext); - //depth++; // To account for pc value in stack[0]; st->depth = depth; st->stack[depth++] = get_current_thread_id(); p->data_offset = offsetof(struct prof_stacktrace_s, marker); @@ -280,12 +272,15 @@ static void atfork_disable_timer(void) { if (profile_interval_usec > 0) { + saved_profile_file = profile_file; + profile_file = -1; remove_sigprof_timer(); } } static void atfork_enable_timer(void) { if (profile_interval_usec > 0) { + profile_file = saved_profile_file; install_sigprof_timer(); } } @@ -332,7 +327,7 @@ return -1; } -static int _write_all(const void *buf, size_t bufsize) +static int _write_all(const char *buf, size_t bufsize) { while (bufsize > 0) { ssize_t count = write(profile_file, buf, bufsize); @@ -344,29 +339,6 @@ return 0; } -static int opened_profile(char *interp_name) -{ - struct { - long hdr[5]; - char interp_name[259]; - } header; - - size_t namelen = strnlen(interp_name, 255); - current_codes = NULL; - - header.hdr[0] = 0; - header.hdr[1] = 3; - header.hdr[2] = 0; - header.hdr[3] = prepare_interval_usec; - header.hdr[4] = 0; - header.interp_name[0] = MARKER_HEADER; - header.interp_name[1] = '\x00'; - header.interp_name[2] = VERSION_TAG; - header.interp_name[3] = namelen; - memcpy(&header.interp_name[4], interp_name, namelen); - return _write_all(&header, 5 * sizeof(long) + 4 + namelen); -} - static int close_profile(void) { unsigned char marker = MARKER_TRAILER; @@ -404,6 +376,9 @@ struct profbuf_s *p; char *t; + if (profile_file == -1) + return 0; // silently don't write it + retry: p = current_codes; if (p != NULL) { @@ -411,7 +386,7 @@ /* grabbed 'current_codes': we will append the current block to it if it contains enough room */ size_t freesize = SINGLE_BUF_SIZE - p->data_size; - if (freesize < blocklen) { + if (freesize < (size_t)blocklen) { /* full: flush it */ commit_buffer(profile_file, p); p = NULL; From pypy.commits at gmail.com Fri Jan 22 09:19:34 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 06:19:34 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: change pdb to some asserts Message-ID: <56a23a76.8673c20a.7e860.3255@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81917:ffec42c76f43 Date: 2016-01-22 15:18 +0100 http://bitbucket.org/pypy/pypy/changeset/ffec42c76f43/ Log: change pdb to some asserts diff --git a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py @@ -74,8 +74,8 @@ tmpfile = str(udir.join('test_rvmprof')) stats = read_profile(tmpfile) t = stats.get_tree() - import pdb - pdb.set_trace() + assert t.name == 'py:x:foo:3' + assert len(t.children) == 1 # jit self.meta_interp(f, [1000000], inline=True) try: From pypy.commits at gmail.com Fri Jan 22 10:46:17 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Jan 2016 07:46:17 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: Fix for some tests: don't use .offset directly, call .getoffset() Message-ID: <56a24ec9.cdb81c0a.8c988.2a4c@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81918:fba43bc13dda Date: 2016-01-22 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/fba43bc13dda/ Log: Fix for some tests: don't use .offset directly, call .getoffset() diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -857,7 +857,8 @@ # eax = address in the stack of a 3-words struct vmprof_stack_s self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) # old = current value of vmprof_tl_stack - self.mc.MOV_rm(old.value, (tloc.value, cintf.vmprof_tl_stack.offset)) + offset = cintf.vmprof_tl_stack.getoffset() + self.mc.MOV_rm(old.value, (tloc.value, offset)) # eax->next = old self.mc.MOV_mr((eax.value, 0), old.value) # eax->value = my esp @@ -865,7 +866,7 @@ # eax->kind = VMPROF_JITTED_TAG self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) # save in vmprof_tl_stack the new eax - self.mc.MOV_mr((tloc.value, cintf.vmprof_tl_stack.offset), eax.value) + self.mc.MOV_mr((tloc.value, offset), eax.value) def _call_footer_vmprof(self): from rpython.rlib.rvmprof.rvmprof import cintf @@ -874,7 +875,8 @@ # eax = (our local vmprof_tl_stack).next self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax - self.mc.MOV_mr((edx.value, cintf.vmprof_tl_stack.offset), eax.value) + offset = cintf.vmprof_tl_stack.getoffset() + self.mc.MOV_mr((edx.value, offset), eax.value) def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -308,7 +308,7 @@ offset = CDefinedIntSymbolic('RPY_TLOFS_%s' % self.fieldname, default='?') offset.loop_invariant = loop_invariant - self.offset = offset + self._offset = offset def getraw(): if we_are_translated(): @@ -364,7 +364,7 @@ ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id, loop_invariant=loop_invariant) setraw = self.setraw - offset = self.offset + offset = self._offset def get(): if we_are_translated(): From pypy.commits at gmail.com Fri Jan 22 11:32:27 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 22 Jan 2016 08:32:27 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix Message-ID: <56a2599b.e906c20a.3af87.5cf5@mx.google.com> Author: Armin Rigo Branch: vmprof-newstack Changeset: r81919:e45af82e0252 Date: 2016-01-22 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e45af82e0252/ Log: fix diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1332,7 +1332,7 @@ tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', loop_invariant=loop_inv) OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET - c = const(tlfield.offset) + c = const(tlfield.getoffset()) v = varoftype(lltype.Signed) op = SpaceOperation('threadlocalref_get', [c], v) cc = FakeBuiltinCallControl() From pypy.commits at gmail.com Fri Jan 22 11:46:06 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 08:46:06 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: fix the test Message-ID: <56a25cce.a867c20a.41779.660b@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81920:b5543f370125 Date: 2016-01-22 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/b5543f370125/ Log: fix the test diff --git a/rpython/jit/metainterp/test/test_jitdriver.py b/rpython/jit/metainterp/test/test_jitdriver.py --- a/rpython/jit/metainterp/test/test_jitdriver.py +++ b/rpython/jit/metainterp/test/test_jitdriver.py @@ -193,7 +193,7 @@ return pc + 1 driver = JitDriver(greens=["pc"], reds='auto', - get_unique_id=get_unique_id) + get_unique_id=get_unique_id, is_recursive=True) def f(arg): i = 0 From pypy.commits at gmail.com Fri Jan 22 11:46:08 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 22 Jan 2016 08:46:08 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: merge Message-ID: <56a25cd0.e906c20a.3af87.61f5@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81921:4b79f234a111 Date: 2016-01-22 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/4b79f234a111/ Log: merge diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1332,7 +1332,7 @@ tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', loop_invariant=loop_inv) OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET - c = const(tlfield.offset) + c = const(tlfield.getoffset()) v = varoftype(lltype.Signed) op = SpaceOperation('threadlocalref_get', [c], v) cc = FakeBuiltinCallControl() From pypy.commits at gmail.com Fri Jan 22 12:04:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 22 Jan 2016 09:04:55 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added comment to my last commit Message-ID: <56a26137.a151c20a.34a75.733a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81922:dcf9b353608e Date: 2016-01-22 18:03 +0100 http://bitbucket.org/pypy/pypy/changeset/dcf9b353608e/ Log: added comment to my last commit diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -871,6 +871,9 @@ lltype.free(status_p, flavor='raw') def _make_waitmacro(name): + # note that rffi.INT as first parameter type is intentional. + # on s390x providing a lltype.Signed as param type, the + # macro wrapper function will always return 0 c_func = external(name, [rffi.INT], lltype.Signed, macro=_MACRO_ON_POSIX) returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG') From pypy.commits at gmail.com Sat Jan 23 01:51:47 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 22 Jan 2016 22:51:47 -0800 (PST) Subject: [pypy-commit] pypy value-profiling: fix interaction of known class lists and cpyext Message-ID: <56a32303.2467c20a.95f27.30c0@mx.google.com> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r81923:f66ef9ad7656 Date: 2016-01-22 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/f66ef9ad7656/ Log: fix interaction of known class lists and cpyext diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -88,6 +88,10 @@ return space.fromcache(EmptyListStrategy) w_firstobj = list_w[0] + if w_firstobj is None: + # this is done by cpyext: + return space.fromcache(ObjectListStrategy) + check_int_or_float = False if type(w_firstobj) is W_IntObject: diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -417,6 +417,13 @@ assert isinstance(w_lst.strategy, SizeListStrategy) assert w_lst.strategy.sizehint == 13 + def test_newlist_with_interplevel_None(self): + # needed for cpyext + space = self.space + w_lst = space.newlist([None] * 10) + assert w_lst.strategy._known_cls is None + + def test_find_fast_on_intlist(self, monkeypatch): monkeypatch.setattr(self.space, "eq_w", None) w = self.space.wrap From pypy.commits at gmail.com Sat Jan 23 08:22:11 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 23 Jan 2016 05:22:11 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: cargo-culting - maybe we should load esi first Message-ID: <56a37e83.c8b3c20a.8831c.ffff9cc7@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r81924:9da0fc568a35 Date: 2016-01-23 14:21 +0100 http://bitbucket.org/pypy/pypy/changeset/9da0fc568a35/ Log: cargo-culting - maybe we should load esi first diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -881,10 +881,11 @@ def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) + if IS_X86_64: + self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) if self.cpu.translate_support_code: self._call_header_vmprof() # on X86_64, this uses esi if IS_X86_64: - self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) self.mc.MOV_rr(ebp.value, edi.value) else: self.mc.MOV_rs(ebp.value, (FRAME_FIXED_SIZE + 1) * WORD) From pypy.commits at gmail.com Sat Jan 23 17:35:21 2016 From: pypy.commits at gmail.com (sbauman) Date: Sat, 23 Jan 2016 14:35:21 -0800 (PST) Subject: [pypy-commit] pypy default: Actually remove GETFIELD_GC_PURE_* from the opcode space Message-ID: <56a40029.17941c0a.2b56c.247a@mx.google.com> Author: Spenser Andrew Bauman Branch: Changeset: r81925:ceefeb71a4eb Date: 2016-01-23 16:01 -0500 http://bitbucket.org/pypy/pypy/changeset/ceefeb71a4eb/ Log: Actually remove GETFIELD_GC_PURE_* from the opcode space diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1154,7 +1154,6 @@ 'ARRAYLEN_GC/1d/i', 'STRLEN/1/i', 'STRGETITEM/2/i', - 'GETFIELD_GC_PURE/1d/rfi', 'GETARRAYITEM_GC_PURE/2d/rfi', #'GETFIELD_RAW_PURE/1d/rfi', these two operations not useful and #'GETARRAYITEM_RAW_PURE/2d/fi', dangerous when unrolling speculatively From pypy.commits at gmail.com Sat Jan 23 17:35:23 2016 From: pypy.commits at gmail.com (sbauman) Date: Sat, 23 Jan 2016 14:35:23 -0800 (PST) Subject: [pypy-commit] pypy default: Actually remove all references to GETFIELD_GC_PURE_* opcode Message-ID: <56a4002b.c9ebc20a.f7f2d.4068@mx.google.com> Author: Spenser Andrew Bauman Branch: Changeset: r81926:013f2f131193 Date: 2016-01-23 17:33 -0500 http://bitbucket.org/pypy/pypy/changeset/013f2f131193/ Log: Actually remove all references to GETFIELD_GC_PURE_* opcode diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -234,7 +234,6 @@ self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) From pypy.commits at gmail.com Sun Jan 24 03:08:03 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Jan 2016 00:08:03 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56a48663.46bb1c0a.7b018.ffff89bb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r695:13468fc54cb3 Date: 2016-01-24 09:07 +0100 http://bitbucket.org/pypy/pypy.org/changeset/13468fc54cb3/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62755 of $105000 (59.8%) + $62765 of $105000 (59.8%)
      @@ -23,7 +23,7 @@
    • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $30354 of $80000 (37.9%) + $30379 of $80000 (38.0%)
      @@ -25,7 +25,7 @@
    • From pypy.commits at gmail.com Sun Jan 24 15:16:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 24 Jan 2016 12:16:30 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56a5311e.29cec20a.57b07.ffffcba4@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r696:43d71eb8c5e7 Date: 2016-01-24 21:16 +0100 http://bitbucket.org/pypy/pypy.org/changeset/43d71eb8c5e7/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62765 of $105000 (59.8%) + $62793 of $105000 (59.8%)
      @@ -23,7 +23,7 @@
    • From pypy.commits at gmail.com Mon Jan 25 05:03:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 02:03:00 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed lldebug properties from test translation (left them for debugging), fixed a bug when Const arg is provided to force_allocate_reg, it could have consumed on of the forbidden_vars Message-ID: <56a5f2d4.0f811c0a.3314d.1f80@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81927:3430d1f200fa Date: 2016-01-25 11:02 +0100 http://bitbucket.org/pypy/pypy/changeset/3430d1f200fa/ Log: removed lldebug properties from test translation (left them for debugging), fixed a bug when Const arg is provided to force_allocate_reg, it could have consumed on of the forbidden_vars diff --git a/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py b/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py --- a/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py +++ b/rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py @@ -26,7 +26,6 @@ t = TranslationContext() t.config.translation.gc = "minimark" t.config.translation.gcremovetypeptr = gcremovetypeptr - t.config.translation.lldebug = True ann = t.buildannotator() ann.build_types(main, [s_list_of_strings], main_entry_point=True) rtyper = t.buildrtyper() diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -84,7 +84,6 @@ # t = TranslationContext() t.config.translation.gc = gc - t.config.translation.lldebug = True # pretty useful when debugging assembly if gc != 'boehm': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -29,12 +29,12 @@ # # -THREADLOCAL_BYTES = 8 +# THREADLOCAL_BYTES = 8 # in reverse order to SP STD_FRAME_SIZE_IN_BYTES = 160 -THREADLOCAL_ADDR_OFFSET = 8 +THREADLOCAL_ADDR_OFFSET = 16 # at position of r2, but r2 is never saved!! assert STD_FRAME_SIZE_IN_BYTES % 2 == 0 diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -199,7 +199,7 @@ # mc.STMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) mc.STG(r.r2, l.addr(2*WORD, r.SP)) - mc.STD(r.f0, l.addr(3*WORD, r.SP)) # slot of r3 is not used here + mc.STD(r.f0, l.addr(16*WORD, r.SP)) saved_regs = None saved_fp_regs = None else: @@ -231,11 +231,11 @@ # Note: if not 'for_frame', argument_loc is r0, which must carefully # not be overwritten above mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain - mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + mc.push_std_frame() mc.load_imm(mc.RAW_CALL_REG, func) mc.LGR(r.r2, argument_loc) mc.raw_call() - mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + mc.pop_std_frame() if for_frame: self._restore_exception(mc, RCS2, RCS3) @@ -251,7 +251,7 @@ if for_frame: mc.LMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) mc.LG(r.r2, l.addr(2*WORD, r.SP)) - mc.LD(r.f0, l.addr(3*WORD, r.SP)) # slot of r3 is not used here + mc.LD(r.f0, l.addr(16*WORD, r.SP)) else: self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) @@ -516,13 +516,13 @@ # registers). mc = InstrBuilder() # - self._push_core_regs_to_jitframe(mc, [r.r14]) # store the link on the jit frame - # Do the call + # store the link backwards + self.mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) mc.push_std_frame() + mc.LGR(r.r2, r.SP) mc.load_imm(mc.RAW_CALL_REG, slowpathaddr) mc.raw_call() - mc.pop_std_frame() # # Check if it raised StackOverflow mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) @@ -531,9 +531,11 @@ # else we have an exception mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # - self._pop_core_regs_from_jitframe(mc, [r.r14]) # restore the link on the jit frame + size = STD_FRAME_SIZE_IN_BYTES + self.mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) # restore the link # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) + mc.trap() # debug if this is EVER executed! # # Else, jump to propagate_exception_path assert self.propagate_exception_path @@ -565,6 +567,7 @@ jmp_pos = self.mc.currpos() self.mc.reserve_cond_jump() + mc.push_std_frame() mc.load_imm(r.r14, self.stack_check_slowpath) mc.BASR(r.r14, r.r14) @@ -1006,6 +1009,9 @@ # save r3, the second argument, to THREADLOCAL_ADDR_OFFSET self.mc.STG(r.r3, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + # push a standard frame for any call + self.mc.push_std_frame() + # move the first argument to SPP: the jitframe object self.mc.LGR(r.SPP, r.r2) @@ -1049,30 +1055,10 @@ self._call_footer_shadowstack(gcrootmap) # restore registers r6-r15 - self.mc.LMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) + size = STD_FRAME_SIZE_IN_BYTES + self.mc.LMG(r.r6, r.r15, l.addr(size+6*WORD, r.SP)) self.jmpto(r.r14) - def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, callee_only=False): - # Push all general purpose registers - base_ofs = self.cpu.get_baseofs_of_frame_field() - if callee_only: - regs = gpr_reg_mgr_cls.save_around_call_regs - else: - regs = gpr_reg_mgr_cls.all_regs - for gpr in regs: - if gpr not in ignored_regs: - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br(v * WORD + base_ofs, gpr.value) - if withfloats: - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - # Push all XMM regs - ofs = len(gpr_reg_mgr_cls.all_regs) - for i in range(len(xmm_reg_mgr_cls.all_regs)): - mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) - def _push_core_regs_to_jitframe(self, mc, includes=r.registers): self._multiple_to_or_from_jitframe(mc, includes, store=True) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -62,7 +62,6 @@ # called function will in turn call further functions (which must be passed the # address of the new frame). This stack grows downwards from high addresses # """ - self.subtracted_to_sp = STD_FRAME_SIZE_IN_BYTES gpr_regs = 0 fpr_regs = 0 @@ -151,7 +150,8 @@ # save the SP back chain self.mc.STG(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) # move the frame pointer - self.mc.LAY(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) + if self.subtracted_to_sp != 0: + self.mc.LAY(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) self.mc.raw_call() # self.ensure_correct_signzero_extension() @@ -180,7 +180,8 @@ def restore_stack_pointer(self): # it must at LEAST be 160 bytes - self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) + if self.subtracted_to_sp != 0: + self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) def load_result(self): assert (self.resloc is None or diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -138,9 +138,9 @@ poolloc = l.pool(offset) if force_in_reg: if selected_reg is None: - tmp = TempVar() + tmp = TempInt() + selected_reg = self.force_allocate_reg(tmp, forbidden_vars=self.temp_boxes) self.temp_boxes.append(tmp) - selected_reg = self.force_allocate_reg(tmp) self.assembler.mc.LG(selected_reg, poolloc) return selected_reg return poolloc @@ -152,7 +152,7 @@ return loc def get_scratch_reg(self): - box = TempVar() + box = TempInt() reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) self.temp_boxes.append(box) return reg @@ -465,7 +465,8 @@ # else, return a regular register (not SPP). if self.rm.reg_bindings.get(var, None) is not None: return self.rm.loc(var, must_exist=True) - return self.rm.force_allocate_reg(var) + forbidden_vars = self.rm.temp_boxes + return self.rm.force_allocate_reg(var, forbidden_vars) def walk_operations(self, inputargs, operations): from rpython.jit.backend.zarch.assembler import ( From pypy.commits at gmail.com Mon Jan 25 07:33:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 04:33:03 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: rewritten many calls to use one stack frame less Message-ID: <56a615ff.4c301c0a.1c7bf.5836@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81928:6a1b2984c003 Date: 2016-01-25 13:31 +0100 http://bitbucket.org/pypy/pypy/changeset/6a1b2984c003/ Log: rewritten many calls to use one stack frame less diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -34,7 +34,8 @@ # in reverse order to SP STD_FRAME_SIZE_IN_BYTES = 160 -THREADLOCAL_ADDR_OFFSET = 16 # at position of r2, but r2 is never saved!! +THREADLOCAL_ON_ENTER_JIT = 8 +THREADLOCAL_ADDR_OFFSET = STD_FRAME_SIZE_IN_BYTES + THREADLOCAL_ON_ENTER_JIT assert STD_FRAME_SIZE_IN_BYTES % 2 == 0 diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -16,7 +16,8 @@ from rpython.jit.backend.zarch.arch import (WORD, STD_FRAME_SIZE_IN_BYTES, THREADLOCAL_ADDR_OFFSET, RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET, - JUMPABS_TARGET_ADDR__POOL_OFFSET, JUMPABS_POOL_ADDR_POOL_OFFSET) + JUMPABS_TARGET_ADDR__POOL_OFFSET, JUMPABS_POOL_ADDR_POOL_OFFSET, + THREADLOCAL_ON_ENTER_JIT) from rpython.jit.backend.zarch.opassembler import OpAssembler from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.codewriter.effectinfo import EffectInfo @@ -382,7 +383,7 @@ """ # signature of these cond_call_slowpath functions: # * on entry, r12 contains the function to call - # * r3, r4, r5, r6 contain arguments for the call + # * r2, r3, r4, r5 contain arguments for the call # * r0 is the gcmap # * the old value of these regs must already be stored in the jitframe # * on exit, all registers are restored from the jitframe @@ -391,6 +392,8 @@ self.mc = mc ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.STG(r.SCRATCH2, l.addr(ofs2,r.SPP)) + mc.STMG(r.r14,r.r15,l.addr(14*WORD, r.SP)) + mc.push_std_frame() # copy registers to the frame, with the exception of r3 to r6 and r12, # because these have already been saved by the caller. Note that @@ -406,21 +409,21 @@ reg is not r.r4 and reg is not r.r5 and reg is not r.r12] - self._push_core_regs_to_jitframe(mc, regs + [r.r14]) + self._push_core_regs_to_jitframe(mc, regs) if supports_floats: self._push_fp_regs_to_jitframe(mc) # allocate a stack frame! - mc.push_std_frame() mc.raw_call(r.r12) - mc.pop_std_frame() # Finish self._reload_frame_if_necessary(mc) - self._pop_core_regs_from_jitframe(mc, saved_regs + [r.r14]) + self._pop_core_regs_from_jitframe(mc, saved_regs) if supports_floats: self._pop_fp_regs_from_jitframe(mc) + size = STD_FRAME_SIZE_IN_BYTES + mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) mc.BCR(c.ANY, r.RETURN) self.mc = None return mc.materialize(self.cpu, []) @@ -446,8 +449,11 @@ mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) saved_regs = [reg for reg in r.MANAGED_REGS if reg is not r.RES and reg is not r.RSZ] - self._push_core_regs_to_jitframe(mc, saved_regs + [r.r14]) + self._push_core_regs_to_jitframe(mc, saved_regs) self._push_fp_regs_to_jitframe(mc) + # alloc a frame for the callee + mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) + mc.push_std_frame() # if kind == 'fixed': addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() @@ -478,10 +484,8 @@ # Do the call addr = rffi.cast(lltype.Signed, addr) - mc.push_std_frame() mc.load_imm(mc.RAW_CALL_REG, addr) mc.raw_call() - mc.pop_std_frame() self._reload_frame_if_necessary(mc) @@ -490,7 +494,7 @@ # emit_call_malloc_gc()). self.propagate_memoryerror_if_r2_is_null() - self._pop_core_regs_from_jitframe(mc, saved_regs + [r.r14]) + self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc) nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr() @@ -501,6 +505,8 @@ # r.RSZ is loaded from [SCRATCH], to make the caller's store a no-op here mc.load(r.RSZ, r.r1, 0) # + size = STD_FRAME_SIZE_IN_BYTES + mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) mc.BCR(c.ANY, r.r14) self.mc = None return mc.materialize(self.cpu, []) @@ -517,7 +523,7 @@ mc = InstrBuilder() # # store the link backwards - self.mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) + mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) mc.push_std_frame() mc.LGR(r.r2, r.SP) @@ -532,7 +538,7 @@ mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # size = STD_FRAME_SIZE_IN_BYTES - self.mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) # restore the link + mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) # restore the link # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) mc.trap() # debug if this is EVER executed! @@ -590,11 +596,11 @@ # LGHI r0, ... (4 bytes) # sum -> (14 bytes) mc.write('\x00'*14) - self.mc.push_std_frame() + mc.push_std_frame() mc.load_imm(r.RETURN, self._frame_realloc_slowpath) self.load_gcmap(mc, r.r1, gcmap) mc.raw_call() - self.mc.pop_std_frame() + mc.pop_std_frame() self.frame_depth_to_patch.append((patch_pos, mc.currpos())) @@ -1006,8 +1012,8 @@ # save the back chain self.mc.STG(r.SP, l.addr(0, r.SP)) - # save r3, the second argument, to THREADLOCAL_ADDR_OFFSET - self.mc.STG(r.r3, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + # save r3, the second argument, to the thread local position + self.mc.STG(r.r3, l.addr(THREADLOCAL_ON_ENTER_JIT, r.SP)) # push a standard frame for any call self.mc.push_std_frame() @@ -1418,9 +1424,7 @@ raise AssertionError(kind) # # call! - mc.push_std_frame() mc.branch_absolute(addr) - mc.pop_std_frame() jmp_location = mc.currpos() mc.reserve_cond_jump(short=True) # jump forward, patched later diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -62,6 +62,7 @@ # called function will in turn call further functions (which must be passed the # address of the new frame). This stack grows downwards from high addresses # """ + self.subtracted_to_sp = 0 gpr_regs = 0 fpr_regs = 0 @@ -83,18 +84,18 @@ stack_params.append(i) self.subtracted_to_sp += len(stack_params) * WORD - base = -len(stack_params) * WORD + base = len(stack_params) * WORD if self.is_call_release_gil: self.subtracted_to_sp += 8*WORD - base -= 8*WORD - # one additional owrd for remap frame layout + base += 8*WORD + # one additional word for remap frame layout # regalloc_push will overwrite -8(r.SP) and destroy # a parameter if we would not reserve that space - base -= WORD - self.subtracted_to_sp += WORD + # base += WORD + # TODO self.subtracted_to_sp += WORD for idx,i in enumerate(stack_params): loc = arglocs[i] - offset = base + 8 * idx + offset = STD_FRAME_SIZE_IN_BYTES - base + 8 * idx if loc.type == FLOAT: if loc.is_fp_reg(): src = loc @@ -148,15 +149,23 @@ def emit_raw_call(self): # always allocate a stack frame for the new function # save the SP back chain - self.mc.STG(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) + #self.mc.STG(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) # move the frame pointer if self.subtracted_to_sp != 0: self.mc.LAY(r.SP, l.addr(-self.subtracted_to_sp, r.SP)) self.mc.raw_call() + + + def restore_stack_pointer(self): + # it must at LEAST be 160 bytes + if self.subtracted_to_sp != 0: + self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) + + def load_result(self): + assert (self.resloc is None or + self.resloc is r.GPR_RETURN or + self.resloc is r.FPR_RETURN) # - self.ensure_correct_signzero_extension() - - def ensure_correct_signzero_extension(self): if self.restype == 'i' and self.ressize != WORD: # we must be sure! libffi (s390x impl) will not return # a sane 64 bit zero/sign extended value. fix for this @@ -177,25 +186,14 @@ else: assert 0, "cannot zero extend size %d" % self.ressize - - def restore_stack_pointer(self): - # it must at LEAST be 160 bytes - if self.subtracted_to_sp != 0: - self.mc.LAY(r.SP, l.addr(self.subtracted_to_sp, r.SP)) - - def load_result(self): - assert (self.resloc is None or - self.resloc is r.GPR_RETURN or - self.resloc is r.FPR_RETURN) - - def call_releasegil_addr_and_move_real_arguments(self, fastgil): assert self.is_call_release_gil RSHADOWOLD = self.RSHADOWOLD RSHADOWPTR = self.RSHADOWPTR RFASTGILPTR = self.RFASTGILPTR # - self.mc.STMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) + pos = STD_FRAME_SIZE_IN_BYTES - 7*WORD + self.mc.STMG(r.r8, r.r13, l.addr(pos, r.SP)) # 6 registers, 1 for a floating point return value! # registered by prepare_arguments! # @@ -268,26 +266,27 @@ PARAM_SAVE_AREA_OFFSET = 0 if reg is not None: # save 1 word below the stack pointer + pos = STD_FRAME_SIZE_IN_BYTES if reg.is_core_reg(): - self.mc.STG(reg, l.addr(-1*WORD, r.SP)) + self.mc.STG(reg, l.addr(pos-1*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.STD(reg, l.addr(-1*WORD, r.SP)) - self.mc.push_std_frame(8*WORD) + self.mc.STD(reg, l.addr(pos-1*WORD, r.SP)) self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) self.mc.raw_call() - self.mc.pop_std_frame(8*WORD) if reg is not None: + pos = STD_FRAME_SIZE_IN_BYTES if reg.is_core_reg(): - self.mc.LG(reg, l.addr(-1*WORD, r.SP)) + self.mc.LG(reg, l.addr(pos-1*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.LD(reg, l.addr(-1*WORD, r.SP)) + self.mc.LD(reg, l.addr(pos-1*WORD, r.SP)) # replace b1_location with BEQ(here) pmc = OverwritingBuilder(self.mc, b1_location, 1) pmc.BRCL(c.EQ, l.imm(self.mc.currpos() - b1_location)) pmc.overwrite() - self.mc.LMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) + pos = STD_FRAME_SIZE_IN_BYTES - 7*WORD + self.mc.LMG(r.r8, r.r13, l.addr(pos, r.SP)) def write_real_errno(self, save_err): if save_err & rffi.RFFI_READSAVED_ERRNO: diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -198,7 +198,7 @@ function pointer, which means on big-endian that it is actually the address of a three-words descriptor. """ - self.BASR(r.RETURN, call_reg) + self.BASR(r.r14, call_reg) def reserve_cond_jump(self, short=False): self.trap() # conditional jump, patched later diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -530,11 +530,7 @@ mc.LGR(r.r0, loc_base) # unusual argument location mc.load_imm(r.r14, self.wb_slowpath[helper_num]) - # alloc a stack frame - mc.push_std_frame() mc.BASR(r.r14, r.r14) - # destory the frame - mc.pop_std_frame() if card_marking_mask: # The helper ends again with a check of the flag in the object. From pypy.commits at gmail.com Mon Jan 25 07:55:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 04:55:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed problem in build_propagate_exception_path, value was loaded into wrong register (that was overwritten after that immediatley) Message-ID: <56a61b2c.cf821c0a.e0c6.6ff7@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81929:2c1aa1edc33a Date: 2016-01-25 13:54 +0100 http://bitbucket.org/pypy/pypy/changeset/2c1aa1edc33a/ Log: fixed problem in build_propagate_exception_path, value was loaded into wrong register (that was overwritten after that immediatley) refactored some calls to use store_link/restore_link instead of manually specifing each STMG/LMG diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -178,6 +178,7 @@ # save the information mc.store_link() + mc.push_std_frame() RCS2 = r.r10 RCS3 = r.r12 @@ -231,12 +232,9 @@ func = rffi.cast(lltype.Signed, func) # Note: if not 'for_frame', argument_loc is r0, which must carefully # not be overwritten above - mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain - mc.push_std_frame() mc.load_imm(mc.RAW_CALL_REG, func) mc.LGR(r.r2, argument_loc) mc.raw_call() - mc.pop_std_frame() if for_frame: self._restore_exception(mc, RCS2, RCS3) @@ -303,6 +301,9 @@ mc = InstrBuilder() self.mc = mc + mc.store_link() + mc.push_std_frame() + # signature of this _frame_realloc_slowpath function: # * on entry, r0 is the new size # * on entry, r1 is the gcmap @@ -314,7 +315,6 @@ self._push_core_regs_to_jitframe(mc, r.MANAGED_REGS) self._push_fp_regs_to_jitframe(mc) - self.mc.store_link() # First argument is SPP (= r31), which is the jitframe mc.LGR(r.r2, r.SPP) @@ -346,9 +346,10 @@ mc.load(r.r5, r.r5, diff) mc.store(r.r2, r.r5, -WORD) - mc.restore_link() self._pop_core_regs_from_jitframe(mc) self._pop_fp_regs_from_jitframe(mc) + + mc.restore_link() mc.BCR(c.ANY, r.RETURN) self._frame_realloc_slowpath = mc.materialize(self.cpu, []) @@ -367,7 +368,7 @@ ofs3 = self.cpu.get_ofs_of_frame_field('jf_guard_exc') ofs4 = self.cpu.get_ofs_of_frame_field('jf_descr') - self._store_and_reset_exception(self.mc, r.r3) + self._store_and_reset_exception(self.mc, r.r2) self.mc.load_imm(r.r3, propagate_exception_descr) self.mc.STG(r.r2, l.addr(ofs3, r.SPP)) self.mc.STG(r.r3, l.addr(ofs4, r.SPP)) @@ -392,7 +393,7 @@ self.mc = mc ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.STG(r.SCRATCH2, l.addr(ofs2,r.SPP)) - mc.STMG(r.r14,r.r15,l.addr(14*WORD, r.SP)) + mc.store_link() mc.push_std_frame() # copy registers to the frame, with the exception of r3 to r6 and r12, @@ -422,8 +423,7 @@ self._pop_core_regs_from_jitframe(mc, saved_regs) if supports_floats: self._pop_fp_regs_from_jitframe(mc) - size = STD_FRAME_SIZE_IN_BYTES - mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) + mc.restore_link() mc.BCR(c.ANY, r.RETURN) self.mc = None return mc.materialize(self.cpu, []) @@ -445,15 +445,16 @@ assert kind in ['fixed', 'str', 'unicode', 'var'] mc = InstrBuilder() self.mc = mc + # alloc a frame for the callee + mc.store_link() + mc.push_std_frame() + # ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) saved_regs = [reg for reg in r.MANAGED_REGS if reg is not r.RES and reg is not r.RSZ] self._push_core_regs_to_jitframe(mc, saved_regs) self._push_fp_regs_to_jitframe(mc) - # alloc a frame for the callee - mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) - mc.push_std_frame() # if kind == 'fixed': addr = self.cpu.gc_ll_descr.get_malloc_slowpath_addr() @@ -505,8 +506,7 @@ # r.RSZ is loaded from [SCRATCH], to make the caller's store a no-op here mc.load(r.RSZ, r.r1, 0) # - size = STD_FRAME_SIZE_IN_BYTES - mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) + mc.restore_link() mc.BCR(c.ANY, r.r14) self.mc = None return mc.materialize(self.cpu, []) @@ -523,7 +523,7 @@ mc = InstrBuilder() # # store the link backwards - mc.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) + mc.store_link() mc.push_std_frame() mc.LGR(r.r2, r.SP) @@ -537,8 +537,7 @@ # else we have an exception mc.cmp_op(r.SCRATCH, l.imm(0), imm=True) # - size = STD_FRAME_SIZE_IN_BYTES - mc.LMG(r.r14, r.r15, l.addr(size+14*WORD, r.SP)) # restore the link + mc.restore_link() # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) mc.trap() # debug if this is EVER executed! diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -212,10 +212,11 @@ self.BASR(r.r14, r.r14) def store_link(self): - self.STG(r.RETURN, l.addr(14*WORD, r.SP)) + self.STMG(r.r14, r.r15, l.addr(14*WORD, r.SP)) def restore_link(self): - self.LG(r.RETURN, l.addr(14*WORD, r.SP)) + off = STD_FRAME_SIZE_IN_BYTES + self.LMG(r.r14, l.addr(off+14*WORD, r.SP)) def push_std_frame(self, additional_bytes=0): self.STG(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) From pypy.commits at gmail.com Mon Jan 25 08:03:26 2016 From: pypy.commits at gmail.com (Raemi) Date: Mon, 25 Jan 2016 05:03:26 -0800 (PST) Subject: [pypy-commit] pypy stmgc-c8: remove dummy allocation Message-ID: <56a61d1e.c177c20a.b064e.3f15@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r81930:89984a5ade1e Date: 2016-01-25 10:51 +0100 http://bitbucket.org/pypy/pypy/changeset/89984a5ade1e/ Log: remove dummy allocation AFAICS, we introduced this in C7 to make stop-the-world commits as fast as possible. It is only needed for major GCs in C8, so let's not pay the overhead. diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -137,16 +137,20 @@ self.handle_getfields(op) def possibly_add_dummy_allocation(self): - if not self.does_any_allocation: - # do a fake allocation since this is needed to check - # for requested safe-points: - self.does_any_allocation = True + # was necessary in C7 for others to commit, but in C8 it is only + # necessary for requesting major GCs. I think we better avoid this + # overhead for tight loops and wait a bit longer in that case. + pass + # if not self.does_any_allocation: + # # do a fake allocation since this is needed to check + # # for requested safe-points: + # self.does_any_allocation = True - # minimum size for the slowpath of MALLOC_NURSERY: - size = self.gc_ll_descr.minimal_size_in_nursery - op = ResOperation(rop.LABEL, []) # temp, will be replaced by gen_malloc_nursery - assert self._op_malloc_nursery is None # no ongoing allocation - self.gen_malloc_nursery(size, op) + # # minimum size for the slowpath of MALLOC_NURSERY: + # size = self.gc_ll_descr.minimal_size_in_nursery + # op = ResOperation(rop.LABEL, []) # temp, will be replaced by gen_malloc_nursery + # assert self._op_malloc_nursery is None # no ongoing allocation + # self.gen_malloc_nursery(size, op) def must_apply_write_barrier(self, val, v): # also apply for non-ref values From pypy.commits at gmail.com Mon Jan 25 08:03:28 2016 From: pypy.commits at gmail.com (Raemi) Date: Mon, 25 Jan 2016 05:03:28 -0800 (PST) Subject: [pypy-commit] pypy stmgc-c8: possible improvement Message-ID: <56a61d20.c8921c0a.886c4.67ad@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r81931:e9d25e9cc525 Date: 2016-01-25 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/e9d25e9cc525/ Log: possible improvement AFAICS at the level where the readbarrier transform gets executed, the operations listed in MALLOCS are already resolved. However, I failed to test this so far. diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -8,6 +8,7 @@ 'malloc', 'malloc_varsize', 'malloc_nonmovable', 'malloc_nonmovable_varsize', 'malloc_noconflict', 'malloc_noconflict_varsize', + 'stm_allocate_tid', ]) READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -246,6 +246,37 @@ assert len(self.read_barriers) == 5 + + +# class TestAfterGCTransform(BaseTestTransform): +# do_read_barrier = True +# do_gc_transform = True + +# def test_malloc_result_readable(self): +# from rpython.flowspace.model import summary +# X = lltype.GcStruct('X', ('foo', lltype.Signed)) +# # +# def nobreak_escape(x): +# x.foo = 7 +# return x +# nobreak_escape._dont_inline_ = True +# # +# def f1(n): +# x = lltype.malloc(X) +# t = x.foo +# nobreak_escape(x) +# return t + +# self.interpret(f1, [4], run=False) +# g = self.graph +# from rpython.translator.translator import graphof +# #ff = graphof(g, f1) +# #ff.show() +# assert summary(g)['stm_read'] == 0 + +# assert self.read_barriers == [x1] + + external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, _callable=lambda: None, random_effects_on_gcobjs=True, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -23,6 +23,7 @@ do_read_barrier = False do_turn_inevitable = False do_jit_driver = False + do_gc_transform = False def build_state(self): self.read_barriers = [] @@ -36,7 +37,7 @@ return 'I' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) - def interpret(self, fn, args, gcremovetypeptr=False, run=True): + def interpret(self, fn, args, run=True): self.build_state() clear_tcache() interp, self.graph = get_interpreter(fn, args, view=False, @@ -45,14 +46,34 @@ interp.frame_class = LLSTMFrame # self.translator = interp.typer.annotator.translator - self.translator.config.translation.gcremovetypeptr = gcremovetypeptr + self.translator.config.translation.gc = "stmgc" + self.translator.config.translation.stm = True self.stmtransformer = STMTransformer(self.translator) if self.do_jit_driver: self.stmtransformer.transform_jit_driver() + if self.do_turn_inevitable: + self.stmtransformer.transform_turn_inevitable() + if self.do_gc_transform: + pass + # from rpython.translator.c.gc import StmFrameworkGcPolicy + # from rpython.translator.c.database import LowLevelDatabase + # from rpython.translator.backendopt.all import backend_optimizations + # from rpython.rtyper.lltypesystem.lltype import getfunctionptr + # self.translator.config.translation.backendopt.inline=True + # self.translator.config.translation.backendopt.inline_threshold=10000 + # self.translator.config.translation.backendopt.mallocs=True + # # backend_optimizations(self.translator, + # # inline_graph_from_anywhere=True, + # # secondary=True, inline=True, inline_threshold=0, + # # mallocs=True, print_statistics=True, + # # clever_malloc_removal=True) + # db = LowLevelDatabase(self.translator, gcpolicyclass=StmFrameworkGcPolicy) + # self.stmtransformer.transform_after_gc() + # list(db.gcpolicy.gc_startup_code()) + # db.get(getfunctionptr(self.graph)) + # db.complete() if self.do_read_barrier: self.stmtransformer.transform_read_barrier() - if self.do_turn_inevitable: - self.stmtransformer.transform_turn_inevitable() if option.view: self.translator.view() # From pypy.commits at gmail.com Mon Jan 25 08:05:32 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 05:05:32 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added missing parameter to LMG call (refactoring issues :) Message-ID: <56a61d9c.8205c20a.bf1e4.7fef@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81932:0e52bea859f2 Date: 2016-01-25 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/0e52bea859f2/ Log: added missing parameter to LMG call (refactoring issues :) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -216,7 +216,7 @@ def restore_link(self): off = STD_FRAME_SIZE_IN_BYTES - self.LMG(r.r14, l.addr(off+14*WORD, r.SP)) + self.LMG(r.r14, r.r15, l.addr(off+14*WORD, r.SP)) def push_std_frame(self, additional_bytes=0): self.STG(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) From pypy.commits at gmail.com Mon Jan 25 09:55:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 06:55:33 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: due to the refacotring the variable saving went to the wrong stack frame in wb_slowpath (now takes the right one) Message-ID: <56a63765.890bc30a.5c6f2.ffffb313@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81933:adce895c17e8 Date: 2016-01-25 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/adce895c17e8/ Log: due to the refacotring the variable saving went to the wrong stack frame in wb_slowpath (now takes the right one) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -199,9 +199,10 @@ # and two more non-volatile registers (used to store # the RPython exception that occurred in the CALL, if any). # - mc.STMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) - mc.STG(r.r2, l.addr(2*WORD, r.SP)) - mc.STD(r.f0, l.addr(16*WORD, r.SP)) + off = STD_FRAME_SIZE_IN_BYTES + mc.STMG(r.r10, r.r12, l.addr(off+10*WORD, r.SP)) + mc.STG(r.r2, l.addr(off+2*WORD, r.SP)) + mc.STD(r.f0, l.addr(off+16*WORD, r.SP)) saved_regs = None saved_fp_regs = None else: @@ -248,9 +249,10 @@ mc.NILL(RCS2, l.imm(card_marking_mask & 0xFF)) if for_frame: - mc.LMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) - mc.LG(r.r2, l.addr(2*WORD, r.SP)) - mc.LD(r.f0, l.addr(16*WORD, r.SP)) + off = STD_FRAME_SIZE_IN_BYTES + mc.LMG(r.r10, r.r12, l.addr(off+10*WORD, r.SP)) + mc.LG(r.r2, l.addr(off+2*WORD, r.SP)) + mc.LD(r.f0, l.addr(off+16*WORD, r.SP)) else: self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) @@ -396,7 +398,7 @@ mc.store_link() mc.push_std_frame() - # copy registers to the frame, with the exception of r3 to r6 and r12, + # copy registers to the frame, with the exception of r2 to r5 and r12, # because these have already been saved by the caller. Note that # this is not symmetrical: these 5 registers are saved by the caller # but restored here at the end of this function. @@ -450,7 +452,7 @@ mc.push_std_frame() # ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.SCRATCH, l.addr(ofs2, r.SPP)) + mc.STG(r.r1, l.addr(ofs2, r.SPP)) saved_regs = [reg for reg in r.MANAGED_REGS if reg is not r.RES and reg is not r.RSZ] self._push_core_regs_to_jitframe(mc, saved_regs) @@ -572,7 +574,6 @@ jmp_pos = self.mc.currpos() self.mc.reserve_cond_jump() - mc.push_std_frame() mc.load_imm(r.r14, self.stack_check_slowpath) mc.BASR(r.r14, r.r14) @@ -1008,8 +1009,6 @@ # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES self.mc.STMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) - # save the back chain - self.mc.STG(r.SP, l.addr(0, r.SP)) # save r3, the second argument, to the thread local position self.mc.STG(r.r3, l.addr(THREADLOCAL_ON_ENTER_JIT, r.SP)) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -219,7 +219,7 @@ self.LMG(r.r14, r.r15, l.addr(off+14*WORD, r.SP)) def push_std_frame(self, additional_bytes=0): - self.STG(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) + self.STG(r.SP, l.addr(0, r.SP)) self.LAY(r.SP, l.addr(-(STD_FRAME_SIZE_IN_BYTES + additional_bytes), r.SP)) def pop_std_frame(self, additional_bytes=0): From pypy.commits at gmail.com Mon Jan 25 12:34:30 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Mon, 25 Jan 2016 09:34:30 -0800 (PST) Subject: [pypy-commit] pypy default: Make C-API char* parameters const-correct with respect to CPython. Message-ID: <56a65ca6.6608c20a.ee2ce.fffff519@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r81934:2b601dbb5d24 Date: 2016-01-25 09:19 -0800 http://bitbucket.org/pypy/pypy/changeset/2b601dbb5d24/ Log: Make C-API char* parameters const-correct with respect to CPython. diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -59,7 +59,7 @@ return None return borrow_from(w_dict, w_res) - at cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) + at cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -128,7 +128,7 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + at cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject, PyCompilerFlagsPtr], PyObject) def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): """Execute Python source code from str in the context specified by the @@ -189,7 +189,7 @@ pi[0] = space.getindex_w(w_obj, None) return 1 - at cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr], PyObject) def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -1,6 +1,6 @@ import errno from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api +from pypy.module.cpyext.api import cpython_api, CONST_STRING from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat @@ -22,7 +22,7 @@ rfloat.DIST_NAN: Py_DTST_NAN } - at cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) + at cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) @jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python From pypy.commits at gmail.com Mon Jan 25 15:15:03 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Jan 2016 12:15:03 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add passing test for multiple inheritance Message-ID: <56a68247.4e8e1c0a.e32dd.2412@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81935:df9304236c27 Date: 2016-01-25 21:58 +0200 http://bitbucket.org/pypy/pypy/changeset/df9304236c27/ Log: add passing test for multiple inheritance diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -371,6 +371,12 @@ 0 /*tp_weaklist*/ }; +PyTypeObject UnicodeSubtype3 = { + PyObject_HEAD_INIT(NULL) + 0, + "foo.fuu3", + sizeof(UnicodeSubclassObject) +}; /* A Metatype */ @@ -777,6 +783,14 @@ CustomType.ob_type = &MetaType; if (PyType_Ready(&CustomType) < 0) return; + + UnicodeSubtype3.tp_flags = Py_TPFLAGS_DEFAULT; + UnicodeSubtype3.tp_base = &UnicodeSubtype; + UnicodeSubtype3.tp_bases = Py_BuildValue("(OO)", &UnicodeSubtype, + &CustomType); + if (PyType_Ready(&UnicodeSubtype3) < 0) + return; + m = Py_InitModule("foo", foo_functions); if (m == NULL) return; @@ -789,6 +803,8 @@ return; if (PyDict_SetItemString(d, "UnicodeSubtype2", (PyObject *) &UnicodeSubtype2) < 0) return; + if (PyDict_SetItemString(d, "UnicodeSubtype3", (PyObject *) &UnicodeSubtype3) < 0) + return; if (PyDict_SetItemString(d, "MetaType", (PyObject *) &MetaType) < 0) return; if (PyDict_SetItemString(d, "InitErrType", (PyObject *) &InitErrType) < 0) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -150,6 +150,14 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) + def test_multiple_inheritance(self): + module = self.import_module(name='foo') + obj = module.UnicodeSubtype(u'xyz') + obj2 = module.UnicodeSubtype2() + obj3 = module.UnicodeSubtype3() + assert obj3.get_val() == 42 + assert len(type(obj3).mro()) == 6 + def test_init(self): module = self.import_module(name="foo") newobj = module.UnicodeSubtype() @@ -416,15 +424,15 @@ module = self.import_extension('foo', [ ("test_tp_getattro", "METH_VARARGS", ''' - PyObject *obj = PyTuple_GET_ITEM(args, 0); - PyIntObject *value = PyTuple_GET_ITEM(args, 1); + PyObject *name, *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *attr, *value = PyTuple_GET_ITEM(args, 1); if (!obj->ob_type->tp_getattro) { PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); return NULL; } - PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + name = PyString_FromString("attr1"); + attr = obj->ob_type->tp_getattro(obj, name); if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, @@ -705,13 +713,13 @@ static PyObject * intlike_nb_add(PyObject *self, PyObject *other) { - long val1 = ((IntLikeObject *)(self))->ival; + long val2, val1 = ((IntLikeObject *)(self))->ival; if (PyInt_Check(other)) { long val2 = PyInt_AsLong(other); return PyInt_FromLong(val1+val2); } - long val2 = ((IntLikeObject *)(other))->ival; + val2 = ((IntLikeObject *)(other))->ival; return PyInt_FromLong(val1+val2); } From pypy.commits at gmail.com Mon Jan 25 15:15:05 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Jan 2016 12:15:05 -0800 (PST) Subject: [pypy-commit] pypy default: fix - link_flags is a tuple not a list Message-ID: <56a68249.878e1c0a.43a8f.21fa@mx.google.com> Author: mattip Branch: Changeset: r81936:e8e0169d3a13 Date: 2016-01-25 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/e8e0169d3a13/ Log: fix - link_flags is a tuple not a list diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -151,7 +151,7 @@ # Increase stack size, for the linker and the stack check code. stack_size = 8 << 20 # 8 Mb - self.link_flags.append('/STACK:%d' % stack_size) + self.link_flags = self.link_flags + ('/STACK:%d' % stack_size,) # The following symbol is used in c/src/stack.h self.cflags.append('/DMAX_STACK_SIZE=%d' % (stack_size - 1024)) From pypy.commits at gmail.com Mon Jan 25 15:15:08 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Jan 2016 12:15:08 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: remove debug cruft Message-ID: <56a6824c.01cdc20a.2b98b.4484@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81938:d4fc8a14bff6 Date: 2016-01-18 20:46 +0200 http://bitbucket.org/pypy/pypy/changeset/d4fc8a14bff6/ Log: remove debug cruft diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -66,7 +66,6 @@ py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_length = len(space.unicode_w(w_obj)) py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) - print w_obj py_unicode.c_hash = space.hash_w(w_obj) py_unicode.c_defenc = lltype.nullptr(PyObject.TO) From pypy.commits at gmail.com Mon Jan 25 15:15:07 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Jan 2016 12:15:07 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: test, fix - raise cpython compatible exception Message-ID: <56a6824b.624fc20a.f6238.2dbb@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81937:26b7a0e96dc1 Date: 2016-01-17 08:17 +0200 http://bitbucket.org/pypy/pypy/changeset/26b7a0e96dc1/ Log: test, fix - raise cpython compatible exception diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -276,7 +276,12 @@ @cpython_api([PyObject], PyCFunction_typedef) def PyCFunction_GetFunction(space, w_obj): - cfunction = space.interp_w(W_PyCFunctionObject, w_obj) + try: + cfunction = space.interp_w(W_PyCFunctionObject, w_obj) + except OperationError, e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_SystemError, "bad argument to internal function") + raise return cfunction.ml.c_ml_meth @cpython_api([PyObject], PyObject) diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -90,7 +90,7 @@ assert mod.isCFunction(mod.getarg_O) == "getarg_O" assert mod.getModule(mod.getarg_O) == 'MyModule' assert mod.isSameFunction(mod.getarg_O) - raises(TypeError, mod.isSameFunction, 1) + raises(SystemError, mod.isSameFunction, 1) class TestPyCMethodObject(BaseApiTest): def test_repr(self, space, api): From pypy.commits at gmail.com Mon Jan 25 15:15:11 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 25 Jan 2016 12:15:11 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <56a6824f.41dfc20a.99dea.2ff8@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81939:98e1616b19f0 Date: 2016-01-25 22:07 +0200 http://bitbucket.org/pypy/pypy/changeset/98e1616b19f0/ Log: merge default into branch diff too long, truncating to 2000 out of 9862 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py --- a/lib_pypy/cPickle.py +++ b/lib_pypy/cPickle.py @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.0" +__version_info__ = (1, 5, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -74,6 +74,7 @@ self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -101,13 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -533,6 +542,31 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + libdir = os.path.join(sys.prefix, 'bin') + dirs = kwds.setdefault('library_dirs', []) + if libdir not in dirs: + dirs.append(libdir) + pythonlib = "pypy-c" + else: + if sys.platform == "win32": + template = "python%d%d" + if sys.flags.debug: + template = template + '_d' + else: + template = "python%d.%d" + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + libraries = kwds.setdefault('libraries', []) + if pythonlib not in libraries: + libraries.append(pythonlib) + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -592,14 +626,23 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, + target=target, source_extension=source_extension, compiler_verbose=verbose, **kwds) def init_once(self, func, tag): @@ -626,6 +669,32 @@ self._init_once_cache[tag] = (True, result) return result + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -220,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -281,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -376,10 +374,13 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._inside_extern_python: - self._declare('extern_python ' + decl.name, tp) + if self._options['dllexport']: + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' else: - self._declare('function ' + decl.name, tp) + tag = 'function ' + self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type @@ -449,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options['override']: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -728,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options['packed'] if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,14 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, + target_extension, embedding) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +38,32 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _save_val(name): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + return config_vars.get(name, Ellipsis) + +def _restore_val(name, value): + import distutils.sysconfig + config_vars = distutils.sysconfig.get_config_vars() + config_vars[name] = value + if value is Ellipsis: + del config_vars[name] + +def _win32_hack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + if not hasattr(MSVCCompiler, '_remove_visual_c_ref_CFFI_BAK'): + MSVCCompiler._remove_visual_c_ref_CFFI_BAK = \ + MSVCCompiler._remove_visual_c_ref + MSVCCompiler._remove_visual_c_ref = lambda self,manifest_file: manifest_file + +def _win32_unhack_for_embedding(): + from distutils.msvc9compiler import MSVCCompiler + MSVCCompiler._remove_visual_c_ref = \ + MSVCCompiler._remove_visual_c_ref_CFFI_BAK + +def _build(tmpdir, ext, compiler_verbose=0, target_extension=None, + embedding=False): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -49,18 +76,29 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: + if sys.platform == 'win32' and embedding: + _win32_hack_for_embedding() old_level = distutils.log.set_threshold(0) or 0 + old_SO = _save_val('SO') + old_EXT_SUFFIX = _save_val('EXT_SUFFIX') try: + if target_extension is not None: + _restore_val('SO', target_extension) + _restore_val('EXT_SUFFIX', target_extension) distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) + _restore_val('SO', old_SO) + _restore_val('EXT_SUFFIX', old_EXT_SUFFIX) + if sys.platform == 'win32' and embedding: + _win32_unhack_for_embedding() except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -281,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -365,17 +389,16 @@ prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if self._num_externpy: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python = ' + prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') - prnt(' p[0] = (const void *)%s;' % VERSION) + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -394,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1123,7 +1146,10 @@ assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) - def _generate_cpy_extern_python_decl(self, tp, name): + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' @@ -1156,7 +1182,11 @@ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) - prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') @@ -1174,6 +1204,9 @@ prnt() self._num_externpy += 1 + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise ffiplatform.VerificationError( @@ -1185,6 +1218,21 @@ self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + # ---------- # emitting the opcodes for individual types @@ -1311,12 +1359,15 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, **kwds): + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1325,13 +1376,40 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + if target == '*': + target_module_name = module_name + target_extension = None # use default + else: + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + else: + target += '.so' + # split along the first '.' (not the last one, otherwise the + # preceeding dots are interpreted as splitting package names) + index = target.find('.') + if index < 0: + raise ValueError("target argument %r should be a file name " + "containing a '.'" % (target,)) + target_module_name = target[:index] + target_extension = target[index:] + # + ext = ffiplatform.get_extension(ext_c_file, target_module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose, + target_extension, + embedding=embedding) finally: os.chdir(cwd) return outputfilename diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg at bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,8 @@ .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 +Fixed ``_PyLong_FromByteArray()``, which was buggy. + .. branch: numpy-1.10 Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy @@ -101,3 +103,33 @@ Fix the cryptic exception message when attempting to use extended slicing in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -84,13 +84,6 @@ from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -149,6 +145,9 @@ return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py --- a/pypy/interpreter/eval.py +++ b/pypy/interpreter/eval.py @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -56,11 +56,13 @@ class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -84,6 +86,10 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) @@ -91,6 +97,14 @@ self._init_ready() self.new_code_hook() + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + def new_code_hook(self): code_hook = self.space.fromcache(CodeHookCache)._code_hook if code_hook is not None: diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -772,7 +772,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff --git a/pypy/module/__builtin__/interp_inspect.py b/pypy/module/__builtin__/interp_inspect.py --- a/pypy/module/__builtin__/interp_inspect.py +++ b/pypy/module/__builtin__/interp_inspect.py @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1084,7 +1084,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -89,6 +89,7 @@ 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame @@ -93,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): @@ -158,4 +158,13 @@ if space.is_none(w_callable): cache._code_hook = None else: - cache._code_hook = w_callable \ No newline at end of file + cache._code_hook = w_callable + + at unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -30,4 +30,20 @@ """ in d finally: __pypy__.set_code_callback(None) - assert d['f'].__code__ in l \ No newline at end of file + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.4.2" +VERSION = "1.5.0" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,10 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + def get_dict_rtld_constants(): found = {} @@ -78,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -2,24 +2,25 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -27,8 +28,10 @@ version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) @@ -38,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,146 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" +/* XXX Windows missing */ +#include +#include +#include + +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + RPyGilAllocate(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.0", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -59,7 +59,7 @@ return None return borrow_from(w_dict, w_res) - at cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) + at cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -30,7 +30,7 @@ currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) @@ -54,7 +54,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return borrow_from(None, caller.get_w_globals()) @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): @@ -128,7 +128,7 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + at cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject, PyCompilerFlagsPtr], PyObject) def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): """Execute Python source code from str in the context specified by the @@ -189,7 +189,7 @@ pi[0] = space.getindex_w(w_obj, None) return 1 - at cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr], PyObject) def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -34,7 +34,7 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -20,7 +20,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch rename from pypy/module/cpyext/Doc_stubgen_enable.patch rename to pypy/module/cpyext/patches/Doc_stubgen_enable.patch diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -1,6 +1,7 @@ import errno from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api, PyObject, CONST_STRING +from pypy.module.cpyext.api import cpython_api, CONST_STRING +from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat from rpython.rlib import rposix, jit diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC From pypy.commits at gmail.com Mon Jan 25 17:12:58 2016 From: pypy.commits at gmail.com (Pierre-Yves David) Date: Mon, 25 Jan 2016 14:12:58 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Registering for the Winter Sprint Message-ID: <56a69dea.53ad1c0a.ff3b1.3e9f@mx.google.com> Author: Pierre-Yves David Branch: extradoc Changeset: r5596:4f73dcd80597 Date: 2016-01-22 09:47 -0800 http://bitbucket.org/pypy/extradoc/changeset/4f73dcd80597/ Log: Registering for the Winter Sprint I'm a Mercurial developer, interested in making progress on having pypy + Mercurial moving toward production readyness and discussing your usage of Mercurial as a project. diff --git a/sprintinfo/leysin-winter-2016/announcement.txt b/sprintinfo/leysin-winter-2016/announcement.txt --- a/sprintinfo/leysin-winter-2016/announcement.txt +++ b/sprintinfo/leysin-winter-2016/announcement.txt @@ -30,6 +30,10 @@ the conflict reporting tools, and more generally to figure out how practical it is in large projects to avoid conflicts +* Running Mercurial on Pypy + +* Workflow of the Pypy Project with Mercurial + * And as usual, the main side goal is to have fun in winter sports :-) We can take a day off for ski. diff --git a/sprintinfo/leysin-winter-2016/people.txt b/sprintinfo/leysin-winter-2016/people.txt --- a/sprintinfo/leysin-winter-2016/people.txt +++ b/sprintinfo/leysin-winter-2016/people.txt @@ -18,6 +18,7 @@ Richard Plangger 20-28 Ermina Maciej Fijalkowski 20-? Ermina (big room preferred) Ronan Lamy 20-27 Ermina? +Pierre-Yves David 20-27 Ermina ==================== ============== ======================= **NOTE:** we might have only a single double-bed room and a big room From pypy.commits at gmail.com Mon Jan 25 21:00:31 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 25 Jan 2016 18:00:31 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Close this head. Message-ID: <56a6d33f.d4811c0a.40e2e.2c58@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r81941:8abc47287812 Date: 2016-01-26 02:51 +0100 http://bitbucket.org/pypy/pypy/changeset/8abc47287812/ Log: Close this head. Something went wrong with the last merge from py3k. I'm trying to fix this by redoing the merge and all subsequent commits. From pypy.commits at gmail.com Mon Jan 25 21:00:29 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 25 Jan 2016 18:00:29 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Add a hack to make functions linked in from C code inlinable. Message-ID: <56a6d33d.82e11c0a.12767.6c5a@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81940:697b7fe15e58 Date: 2016-01-16 01:34 +0100 http://bitbucket.org/pypy/pypy/changeset/697b7fe15e58/ Log: Add a hack to make functions linked in from C code inlinable. To make inlining these functions possible, their callers must (simplified) have the same target specified. This commits adds a hack to get the necessary attributes from clang. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -942,13 +942,14 @@ def write_graph(self, ptr_type, name, graph, export): prevent_inline, llvmgcroot = self.prepare_graph(ptr_type, name, graph) - self.w('define {linkage}{retvar.T} {name}({a}){add}{gc} {{'.format( + self.w('define {linkage}{retvar.T} {name}({a}) {attrs}{gc} {{'.format( linkage='' if export else 'internal ', retvar=get_repr(graph.getreturnvar()), name=name, a=', '.join(get_repr(arg).TV for arg in graph.getargs() if arg.concretetype is not lltype.Void), - add=' noinline' if prevent_inline else '', + attrs=database.target_attributes + + (' noinline' if prevent_inline else ''), gc=' gc "pypy"' if llvmgcroot else ''), '') @@ -1842,6 +1843,32 @@ self.gcpolicy.finish() + def _get_target_information(self): + """ + Get datalayout, triple, and "target-cpu" and "target-features" + attributes for a generic version of the CPU we are compiling on. + + This passes a small C code snippet to clang and parses the output in a + way that could easily break with any future LLVM version. + """ + output = cmdexec( + 'echo "void test() {{}}" | clang -x c - -emit-llvm -S -o -' + .format(devnull)) + for line in output.splitlines(True): + if line.startswith('target '): + yield line + if line.startswith('attributes'): + assert line.startswith('attributes #0 = { ') + assert line.endswith(' }\n') + attributes_str = line[len('attributes #0 = { '):-len(' }\n')] + for attribute in attributes_str.split(): + if attribute.startswith('"target-cpu"='): + target_cpu_attribute = attribute + if attribute.startswith('"target-features"='): + target_features_attribute = attribute + database.target_attributes = '{} {}'.format( + target_cpu_attribute, target_features_attribute) + def _write_special_declarations(self, f): compiler_info_str = "LLVM " + cmdexec('llvm-config --version').strip() cstr_type = '[{} x i8]'.format(len(compiler_info_str) + 1) @@ -1877,13 +1904,10 @@ self.work_dir.mkdir() self.main_ll_file = self.work_dir.join('main.ll') with self.main_ll_file.open('w') as f: - output = cmdexec('clang -emit-llvm -S -x c {} -o -' - .format(devnull)) - for line in output.splitlines(True): - if line.startswith('target '): - f.write(line) + database = Database(self, f) - database = Database(self, f) + for line in self._get_target_information(): + f.write(line) from rpython.translator.c.database import LowLevelDatabase from rpython.translator.c.genc import gen_threadlocal_structdef From pypy.commits at gmail.com Mon Jan 25 21:00:33 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 25 Jan 2016 18:00:33 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Remove trailing whitespace. Message-ID: <56a6d341.d7bc1c0a.c68bd.2a91@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r81942:9b1940890dc0 Date: 2016-01-26 02:59 +0100 http://bitbucket.org/pypy/pypy/changeset/9b1940890dc0/ Log: Remove trailing whitespace. diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -183,7 +183,7 @@ assert dco.eof == True dco.flush() assert dco.eof == True - + def test_decompress_eof_incomplete_stream(self): import zlib x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo' From pypy.commits at gmail.com Tue Jan 26 02:42:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 23:42:58 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed longlong from s390x from cpu Message-ID: <56a72382.2968c20a.c229f.08b3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81943:47d8fe0b9435 Date: 2016-01-26 08:42 +0100 http://bitbucket.org/pypy/pypy/changeset/47d8fe0b9435/ Log: removed longlong from s390x from cpu diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], MODEL_PPC_64: [], # we don't even have PPC directory, so no - MODEL_S390_64: ['floats', 'longlong'], + MODEL_S390_64: ['floats'], }[backend_name] if __name__ == '__main__': From pypy.commits at gmail.com Tue Jan 26 02:47:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 25 Jan 2016 23:47:54 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <56a724aa.c6e01c0a.df7b1.ffffd0b9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81944:c6029ee6abcc Date: 2016-01-26 08:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c6029ee6abcc/ Log: merged default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -124,6 +124,15 @@ Fix SSL tests by importing cpython's patch +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + .. branch: memop-simplify3 Further simplifying the backend operations malloc_cond_varsize and zero_array. diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -59,7 +59,7 @@ return None return borrow_from(w_dict, w_res) - at cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) + at cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -128,7 +128,7 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + at cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject, PyCompilerFlagsPtr], PyObject) def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): """Execute Python source code from str in the context specified by the @@ -189,7 +189,7 @@ pi[0] = space.getindex_w(w_obj, None) return 1 - at cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr], PyObject) def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -1,6 +1,6 @@ import errno from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api +from pypy.module.cpyext.api import cpython_api, CONST_STRING from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat @@ -22,7 +22,7 @@ rfloat.DIST_NAN: Py_DTST_NAN } - at cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) + at cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) @jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -83,9 +83,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -435,21 +435,21 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -466,7 +466,7 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) guard_not_invalidated? diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -84,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -93,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -82,7 +82,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -152,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure @@ -608,9 +608,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr @@ -200,7 +201,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -243,7 +243,6 @@ self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) @@ -497,8 +496,8 @@ elif arraydescr.itemsize == 0: total_size = arraydescr.basesize elif (self.gc_ll_descr.can_use_nursery_malloc(1) and - self.gen_malloc_nursery_varsize(arraydescr.itemsize, v_length, - op, arraydescr, kind=kind)): + self.gen_malloc_nursery_varsize(arraydescr.itemsize, + v_length, op, arraydescr, kind=kind)): # note that we cannot initialize tid here, because the array # might end up being allocated by malloc_external or some # stuff that initializes GC header fields differently @@ -534,8 +533,6 @@ # See emit_pending_zeros(). (This optimization is done by # hacking the object 'o' in-place: e.g., o.getarg(1) may be # replaced with another constant greater than 0.) - #o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], - # descr=arraydescr) assert isinstance(arraydescr, ArrayDescr) scale = arraydescr.itemsize v_length_scaled = v_length diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1477,9 +1477,6 @@ genop_getfield_gc_f = _genop_getfield genop_getfield_raw_i = _genop_getfield genop_getfield_raw_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield def _genop_gc_load(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, sign_loc = arglocs diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -816,9 +816,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -183,6 +183,8 @@ return res def invalidate(self, descr): + if descr.is_always_pure(): + return for opinfo in self.cached_infos: assert isinstance(opinfo, info.AbstractStructPtrInfo) opinfo._fields[descr.get_index()] = None @@ -515,9 +517,14 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -525,23 +532,10 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) @@ -631,12 +625,12 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC_PURE(s, descr='inst_x') + # x = GETFIELD_GC(s, descr='inst_x') # pure # If 's' is a constant (after optimizations) we rely on the rest of the - # optimizations to constant-fold the following getfield_gc_pure. + # optimizations to constant-fold the following pure getfield_gc. # in addition, we record the dependency here to make invalidation work # correctly. - # NB: emitting the GETFIELD_GC_PURE is only safe because the + # NB: emitting the pure GETFIELD_GC is only safe because the # QUASIIMMUT_FIELD is also emitted to make sure the dependency is # registered. structvalue = self.ensure_ptr_info_arg0(op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print +from rpython.jit.metainterp.optimize import SpeculativeError @@ -374,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): @@ -736,12 +738,64 @@ self.emit_operation(op) def constant_fold(self, op): + self.protect_speculative_operation(op) argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] return execute_nonspec_const(self.cpu, None, op.getopnum(), argboxes, op.getdescr(), op.type) + def protect_speculative_operation(self, op): + """When constant-folding a pure operation that reads memory from + a gcref, make sure that the gcref is non-null and of a valid type. + Otherwise, raise SpeculativeError. This should only occur when + unrolling and optimizing the unrolled loop. Note that if + cpu.supports_guard_gc_type is false, we can't really do this + check at all, but then we don't unroll in that case. + """ + opnum = op.getopnum() + cpu = self.cpu + + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): + fielddescr = op.getdescr() + ref = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_field(ref, fielddescr) + return + + elif (opnum == rop.GETARRAYITEM_GC_PURE_I or + opnum == rop.GETARRAYITEM_GC_PURE_R or + opnum == rop.GETARRAYITEM_GC_PURE_F or + opnum == rop.ARRAYLEN_GC): + arraydescr = op.getdescr() + array = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_array(array, arraydescr) + if opnum == rop.ARRAYLEN_GC: + return + arraylength = cpu.bh_arraylen_gc(array, arraydescr) + + elif (opnum == rop.STRGETITEM or + opnum == rop.STRLEN): + string = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_string(string) + if opnum == rop.STRLEN: + return + arraylength = cpu.bh_strlen(string) + + elif (opnum == rop.UNICODEGETITEM or + opnum == rop.UNICODELEN): + unicode = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_unicode(unicode) + if opnum == rop.UNICODELEN: + return + arraylength = cpu.bh_unicodelen(unicode) + + else: + return + + index = self.get_constant_box(op.getarg(1)).getint() + if not (0 <= index < arraylength): + raise SpeculativeError + def is_virtual(self, op): if op.type == 'r': opinfo = self.getptrinfo(op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -94,7 +94,6 @@ break else: # all constant arguments: constant-fold away - self.protect_speculative_operation(op) resbox = self.optimizer.constant_fold(op) # note that INT_xxx_OVF is not done from here, and the # overflows in the INT_xxx operations are ignored @@ -119,59 +118,6 @@ if nextop: self.emit_operation(nextop) - def protect_speculative_operation(self, op): - """When constant-folding a pure operation that reads memory from - a gcref, make sure that the gcref is non-null and of a valid type. - Otherwise, raise SpeculativeError. This should only occur when - unrolling and optimizing the unrolled loop. Note that if - cpu.supports_guard_gc_type is false, we can't really do this - check at all, but then we don't unroll in that case. - """ - opnum = op.getopnum() - cpu = self.optimizer.cpu - - if (opnum == rop.GETFIELD_GC_PURE_I or - opnum == rop.GETFIELD_GC_PURE_R or - opnum == rop.GETFIELD_GC_PURE_F): - fielddescr = op.getdescr() - ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) - return - - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() - array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) - if opnum == rop.ARRAYLEN_GC: - return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) - - elif (opnum == rop.STRGETITEM or - opnum == rop.STRLEN): - string = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_string(string) - if opnum == rop.STRLEN: - return - arraylength = cpu.bh_strlen(string) - - elif (opnum == rop.UNICODEGETITEM or - opnum == rop.UNICODELEN): - unicode = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_unicode(unicode) - if opnum == rop.UNICODELEN: - return - arraylength = cpu.bh_unicodelen(unicode) - - else: - return - - index = self.get_constant_box(op.getarg(1)).getint() - if not (0 <= index < arraylength): - raise SpeculativeError - def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: opnum = opnum - rop._OVF_FIRST diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -521,8 +521,8 @@ def test_getfield(self): graph = self.build_dependency(""" [p0, p1] # 0: 1,2,5 - p2 = getfield_gc_r(p0) # 1: 3,5 - p3 = getfield_gc_r(p0) # 2: 4 + p2 = getfield_gc_r(p0, descr=valuedescr) # 1: 3,5 + p3 = getfield_gc_r(p0, descr=valuedescr) # 2: 4 guard_nonnull(p2) [p2] # 3: 4,5 guard_nonnull(p3) [p3] # 4: 5 jump(p0,p2) # 5: @@ -532,10 +532,10 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,5 + p13 = getfield_gc_r(p9, descr=valuedescr) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 - i14 = getfield_gc_i(p9) # 3: 5 - p15 = getfield_gc_r(p13) # 4: 5 + i14 = getfield_gc_i(p9, descr=valuedescr) # 3: 5 + p15 = getfield_gc_r(p13, descr=valuedescr) # 4: 5 guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -955,12 +955,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -969,17 +969,16 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) - """ - self.node.value = 5 + jump(7) + """ self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1343,7 +1342,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1355,7 +1354,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1597,7 +1596,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1608,7 +1607,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -5076,7 +5075,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ @@ -5462,15 +5461,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1409,12 +1409,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_pure_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1423,10 +1423,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_pure_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1436,20 +1436,20 @@ self.node.value = 5 self.optimize_loop(ops, expected) - def test_getfield_gc_pure_3(self): + def test_pure_getfield_gc_3(self): ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2319,7 +2319,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2332,7 +2332,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2340,11 +2340,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2352,8 +2353,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2669,7 +2669,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2680,7 +2680,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -3302,8 +3302,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -3330,8 +3330,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3522,14 +3522,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4878,11 +4878,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4972,18 +4972,16 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - - """ - self.node.value = 5 + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) + """ self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4992,7 +4990,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5002,7 +5000,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5010,7 +5008,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5021,7 +5019,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -6167,14 +6165,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -7229,13 +7227,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7246,7 +7244,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7298,11 +7296,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7325,11 +7323,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7568,7 +7566,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7578,7 +7576,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7852,14 +7850,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7868,7 +7866,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7883,14 +7881,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) - """ - expected = """ - [p0, p2, p1] + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) + """ + expected = """ + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -8065,7 +8063,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8160,9 +8158,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8201,14 +8199,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(ConstPtr(myptr), ConstPtr(myptr2), descr=otherdescr) + setfield_gc(ConstPtr(myptr), ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8232,14 +8230,14 @@ ops = """ [p0] p10 = getfield_gc_r(p0, descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(p0, ConstPtr(myptr2), descr=otherdescr) + setfield_gc(p0, ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8624,17 +8622,17 @@ [p10] p52 = getfield_gc_r(p10, descr=nextdescr) # inst_storage p54 = getarrayitem_gc_r(p52, 0, descr=arraydescr) - p69 = getfield_gc_pure_r(p54, descr=otherdescr) # inst_w_function + p69 = getfield_gc_r(p54, descr=otherdescr) # inst_w_function quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(descr=nodesize) p108 = new_array(3, descr=arraydescr) p110 = new_with_vtable(descr=nodesize) - setfield_gc(p110, ConstPtr(myptr2), descr=otherdescr) # inst_w_function + setfield_gc(p110, ConstPtr(myptrb), descr=otherdescr) # inst_w_function setarrayitem_gc(p108, 0, p110, descr=arraydescr) setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) @@ -8650,7 +8648,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ @@ -8852,13 +8850,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_r(p0, descr=nextdescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) @@ -8888,13 +8886,13 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarraydescr) + f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ expected = """ [p0] - ifoo = arraylen_gc(p0, descr=floatarraydescr) + ifoo = arraylen_gc(p0, descr=floatarrayimmutdescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -9102,7 +9100,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) jump(NULL, i3) """ @@ -9113,9 +9111,9 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) - jump(ConstPtr(myptr4), i3) + jump(ConstPtr(myptr2), i3) """ py.test.raises(InvalidLoop, self.optimize_loop, ops, ops) @@ -9265,9 +9263,126 @@ guard_value(i1, 5) [] jump() """ - a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE)), 5, zero=True) + a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) self.optimize_loop(ops, expected, jump_values=[a]) + def test_large_number_of_immutable_references(self): + ops = """ + [p0] + i0 = getfield_gc_i(p0, descr=bigadescr) + i1 = getfield_gc_i(p0, descr=bigbdescr) + i2 = getfield_gc_i(p0, descr=bigcdescr) + i3 = getfield_gc_i(p0, descr=bigddescr) + i4 = getfield_gc_i(p0, descr=bigedescr) + i5 = getfield_gc_i(p0, descr=bigfdescr) + i6 = getfield_gc_i(p0, descr=biggdescr) + i7 = getfield_gc_i(p0, descr=bighdescr) + i8 = getfield_gc_i(p0, descr=bigidescr) + i9 = getfield_gc_i(p0, descr=bigjdescr) + i10 = getfield_gc_i(p0, descr=bigkdescr) + i11 = getfield_gc_i(p0, descr=bigldescr) + i12 = getfield_gc_i(p0, descr=bigmdescr) + i13 = getfield_gc_i(p0, descr=bigndescr) + i14 = getfield_gc_i(p0, descr=bigodescr) + i15 = getfield_gc_i(p0, descr=bigpdescr) + i16 = getfield_gc_i(p0, descr=bigqdescr) + i17 = getfield_gc_i(p0, descr=bigrdescr) + i18 = getfield_gc_i(p0, descr=bigsdescr) + i19 = getfield_gc_i(p0, descr=bigtdescr) + i20 = getfield_gc_i(p0, descr=bigudescr) + i21 = getfield_gc_i(p0, descr=bigvdescr) + i22 = getfield_gc_i(p0, descr=bigwdescr) + i23 = getfield_gc_i(p0, descr=bigxdescr) + i24 = getfield_gc_i(p0, descr=bigydescr) + i25 = getfield_gc_i(p0, descr=bigzdescr) + i27 = getfield_gc_i(p0, descr=bigbdescr) + i28 = getfield_gc_i(p0, descr=bigcdescr) + i29 = getfield_gc_i(p0, descr=bigddescr) + i30 = getfield_gc_i(p0, descr=bigedescr) + i31 = getfield_gc_i(p0, descr=bigfdescr) + i32 = getfield_gc_i(p0, descr=biggdescr) + i33 = getfield_gc_i(p0, descr=bighdescr) + i34 = getfield_gc_i(p0, descr=bigidescr) + i35 = getfield_gc_i(p0, descr=bigjdescr) + i36 = getfield_gc_i(p0, descr=bigkdescr) + i37 = getfield_gc_i(p0, descr=bigldescr) + i38 = getfield_gc_i(p0, descr=bigmdescr) + i39 = getfield_gc_i(p0, descr=bigndescr) + i40 = getfield_gc_i(p0, descr=bigodescr) + i41 = getfield_gc_i(p0, descr=bigpdescr) + i42 = getfield_gc_i(p0, descr=bigqdescr) + i43 = getfield_gc_i(p0, descr=bigrdescr) + i44 = getfield_gc_i(p0, descr=bigsdescr) + i45 = getfield_gc_i(p0, descr=bigtdescr) + i46 = getfield_gc_i(p0, descr=bigudescr) + i47 = getfield_gc_i(p0, descr=bigvdescr) + i48 = getfield_gc_i(p0, descr=bigwdescr) + i49 = getfield_gc_i(p0, descr=bigxdescr) + i50 = getfield_gc_i(p0, descr=bigydescr) + i51 = getfield_gc_i(p0, descr=bigzdescr) + i26 = getfield_gc_i(p0, descr=bigadescr) + i99 = int_add(i26, i51) + escape_i(i27) + escape_i(i28) + escape_i(i29) + escape_i(i30) + escape_i(i31) + escape_i(i32) + escape_i(i33) + escape_i(i34) + escape_i(i35) + escape_i(i36) + escape_i(i37) + escape_i(i38) + escape_i(i39) + escape_i(i40) + escape_i(i41) + escape_i(i42) + escape_i(i43) + escape_i(i44) + escape_i(i45) + escape_i(i46) + escape_i(i47) + escape_i(i48) + escape_i(i49) + escape_i(i50) + escape_i(i51) + escape_i(i26) + escape_i(i99) + jump(p0) + """ + expected = """ + [p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99] + escape_i(i1) + escape_i(i2) + escape_i(i3) + escape_i(i4) + escape_i(i5) + escape_i(i6) + escape_i(i7) + escape_i(i8) + escape_i(i9) + escape_i(i10) + escape_i(i11) + escape_i(i12) + escape_i(i13) + escape_i(i14) + escape_i(i15) + escape_i(i16) + escape_i(i17) + escape_i(i18) + escape_i(i19) + escape_i(i20) + escape_i(i21) + escape_i(i22) + escape_i(i23) + escape_i(i24) + escape_i(i25) + escape_i(i0) + escape_i(i99) + jump(p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -220,16 +220,16 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr) + pc = getfield_gc_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches - p1 = getfield_gc_r(pc, descr=nextdescr) - i0 = getfield_gc_i(p1, descr=valuedescr) + p1 = getfield_gc_r(pc, descr=nextdescr3) + i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) assert len(es.short_boxes) == 4 # both getfields are available as - # well as getfield_gc_pure + # well as getfield_gc def test_p123_anti_nested(self): loop = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,4 +1,4 @@ -import py, random +import py, random, string from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -122,7 +122,14 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + + big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] + BIG = lltype.GcForwardReference() + BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) + + for field, _ in big_fields: + locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) + node = lltype.malloc(NODE) node.value = 5 node.next = node @@ -133,16 +140,25 @@ node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) - mynode2 = lltype.malloc(NODE) + mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) - mynode2.parent.typeptr = node_vtable - myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode2) - mynode3 = lltype.malloc(NODE2) - mynode3.parent.parent.typeptr = node_vtable2 + mynodeb.parent.typeptr = node_vtable + myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) + myptr2 = lltype.malloc(NODE2) + myptr2.parent.parent.typeptr = node_vtable2 + myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) + nullptr = lltype.nullptr(llmemory.GCREF.TO) + + mynode3 = lltype.malloc(NODE3) + mynode3.parent.typeptr = node_vtable3 + mynode3.value = 7 + mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) # a NODE2 mynode4 = lltype.malloc(NODE3) mynode4.parent.typeptr = node_vtable3 myptr4 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode4) # a NODE3 + + nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) @@ -203,7 +219,6 @@ arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) int32arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.INT)) int16arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.SHORT)) - floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) float32arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.SingleFloat)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) @@ -212,6 +227,12 @@ array2ref = lltype.cast_opaque_ptr(llmemory.GCREF, array2) gcarraydescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF)) gcarraydescr_tid = gcarraydescr.get_type_id() + floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) + + arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) + gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) + floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) @@ -244,7 +265,7 @@ tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') - arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE))) + arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1103,8 +1103,8 @@ jump(p0) """ self.optimize_bridge(loops, bridge, loops[0], 'Loop0', [self.myptr]) - self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr3]) - self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr3]) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr2]) + self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr2]) self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', [self.nullptr]) self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', [self.nullptr]) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -188,12 +188,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I - def optimize_SETFIELD_GC(self, op): struct = op.getarg(0) opinfo = self.getptrinfo(struct) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -653,46 +653,37 @@ @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_I, fielddescr, box) + return ConstInt(resbox) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_I, box, fielddescr, 'i') + + @arguments("box", "descr") + def opimpl_getfield_gc_f(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resvalue = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_F, fielddescr, box) + return ConstFloat(resvalue) + return self._opimpl_getfield_gc_any_pureornot( + rop.GETFIELD_GC_F, box, fielddescr, 'f') + @arguments("box", "descr") def opimpl_getfield_gc_r(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_R, fielddescr, box) + return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_R, box, fielddescr, 'r') - @arguments("box", "descr") - def opimpl_getfield_gc_f(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_i_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_I, fielddescr, box) - return ConstInt(resbox) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_I, box, fielddescr, 'i') - - @arguments("box", "descr") - def opimpl_getfield_gc_f_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_F, fielddescr, box) - return ConstFloat(resvalue) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_r_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_R, fielddescr, box) - return ConstPtr(val) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_R, box, fielddescr, 'r') + + opimpl_getfield_gc_i_pure = opimpl_getfield_gc_i + opimpl_getfield_gc_r_pure = opimpl_getfield_gc_r + opimpl_getfield_gc_f_pure = opimpl_getfield_gc_f @arguments("box", "box", "descr") def opimpl_getinteriorfield_gc_i(self, array, index, descr): @@ -733,7 +724,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info - opnum = OpHelpers.getfield_pure_for_descr(fielddescr) + opnum = OpHelpers.getfield_for_descr(fielddescr) if (ginfo is not None and fielddescr in ginfo.green_field_descrs and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't @@ -2104,17 +2095,7 @@ profiler = self.staticdata.profiler profiler.count_ops(opnum) resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) - # - is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST - if not is_pure: - if (opnum == rop.GETFIELD_RAW_I or - opnum == rop.GETFIELD_RAW_R or - opnum == rop.GETFIELD_RAW_F or - opnum == rop.GETARRAYITEM_RAW_I or - opnum == rop.GETARRAYITEM_RAW_F): - is_pure = descr.is_always_pure() - # - if is_pure: + if OpHelpers.is_pure_with_descr(opnum, descr): return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -231,7 +231,7 @@ class AbstractResOpOrInputArg(AbstractValue): _attrs_ = ('_forwarded',) - _forwarded = None # either another resop or OptInfo + _forwarded = None # either another resop or OptInfo def get_forwarded(self): return self._forwarded @@ -412,6 +412,8 @@ return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST def is_always_pure(self): + # Tells whether an operation is pure based solely on the opcode. + # Other operations (e.g. getfield ops) may be pure in some cases are well. return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): @@ -434,9 +436,7 @@ return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) def is_getarrayitem(self): return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, @@ -1154,7 +1154,6 @@ 'ARRAYLEN_GC/1d/i', 'STRLEN/1/i', 'STRGETITEM/2/i', - 'GETFIELD_GC_PURE/1d/rfi', 'GETARRAYITEM_GC_PURE/2d/rfi', #'GETFIELD_RAW_PURE/1d/rfi', these two operations not useful and #'GETARRAYITEM_RAW_PURE/2d/fi', dangerous when unrolling speculatively @@ -1602,14 +1601,6 @@ return rop.CALL_LOOPINVARIANT_N @staticmethod - def getfield_pure_for_descr(descr): - if descr.is_pointer_field(): - return rop.GETFIELD_GC_PURE_R - elif descr.is_float_field(): - return rop.GETFIELD_GC_PURE_F - return rop.GETFIELD_GC_PURE_I - - @staticmethod def getfield_for_descr(descr): if descr.is_pointer_field(): return rop.GETFIELD_GC_R @@ -1760,4 +1751,26 @@ opnum = rop.VEC_UNPACK_F return VecOperationNew(opnum, args, datatype, bytesize, signed, count) + @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + @staticmethod + def is_pure_with_descr(opnum, descr): + is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST + if not is_pure: + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + is_pure = descr.is_always_pure() + return is_pure + diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -320,7 +320,7 @@ assert res == 252 self.check_trace_count(1) self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, - 'getfield_gc_pure_i': 1, 'int_mul': 1, + 'getfield_gc_i': 1, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): @@ -1405,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) + self.check_operations_history(setfield_gc=2, getfield_gc_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -2522,7 +2522,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_resops(getfield_gc_pure_r=2) + self.check_resops(getfield_gc_r=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -19,7 +19,7 @@ return y.x + 5 res = self.interp_operations(f, [23]) assert res == 28 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=1, int_add=1) + self.check_operations_history(getfield_gc_i=1, int_add=1) def test_fields_subclass(self): class X(object): @@ -41,8 +41,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def f(x, y): # this time, the field 'x' only shows up on subclass 'Y' @@ -50,8 +49,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def test_array(self): class X(object): @@ -66,8 +64,7 @@ return a.y[index] res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_index_error(self): class X(object): @@ -89,8 +86,7 @@ return a.get(index) res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_in_immutable(self): class X(object): @@ -106,8 +102,7 @@ return y.lst[index] + y.y + 5 res = self.interp_operations(f, [23, 0], listops=True) assert res == 23 + 24 + 5 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getfield_gc_pure_i=1, + self.check_operations_history(getfield_gc_r=1, getfield_gc_i=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1, int_add=3) diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -74,7 +74,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -101,7 +101,7 @@ res = self.meta_interp(f, [100, 7], enable_opts="") assert res == 700 # there should be no getfields, even though optimizations are turned off - self.check_resops(guard_not_invalidated=1, getfield_gc=0) + self.check_resops(guard_not_invalidated=1) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -124,8 +124,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, - getfield_gc_pure_i=2) + self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, getfield_gc_i=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -156,7 +155,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -208,7 +207,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=0) + self.check_resops(guard_not_invalidated=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -234,7 +233,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_resops(guard_not_invalidated=4, getfield_gc=0) + self.check_resops(guard_not_invalidated=4) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -348,7 +347,7 @@ res = self.meta_interp(f, [100, 30]) assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, - call_may_force=0, getfield_gc=0) + call_may_force=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -374,8 +373,7 @@ getarrayitem_gc_pure_r=0, getarrayitem_gc_i=0, getarrayitem_gc_r=0, - getfield_gc_i=0, getfield_gc_pure_i=0, - getfield_gc_r=0, getfield_gC_pure_r=0) + getfield_gc_i=0, getfield_gc_r=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -405,9 +403,7 @@ assert res == 700 # operations must have been removed by the frontend self.check_resops(getarrayitem_gc_pure_i=0, guard_not_invalidated=1, - getarrayitem_gc_i=0, - getfield_gc=0, getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + getarrayitem_gc_i=0, getfield_gc_i=0, getfield_gc_r=0) def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -436,10 +436,10 @@ return p.x[0] + p.x[1] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == -7 - 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) def test_heap_caching_and_elidable_function(self): class A: @@ -517,12 +517,12 @@ return a1[0] + a2[0] + gn(a1, a2) res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) def test_heap_caching_multiple_arrays(self): class Gbl(object): diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1077,7 +1077,7 @@ res = self.meta_interp(f, [], repeat=7) assert res == f() - def test_getfield_gc_pure_nobug(self): + def test_pure_getfield_gc_nobug(self): mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): diff --git a/rpython/rlib/test/test_rpath.py b/rpython/rlib/test/test_rpath.py --- a/rpython/rlib/test/test_rpath.py +++ b/rpython/rlib/test/test_rpath.py @@ -68,8 +68,8 @@ assert rpath._nt_rabspath('d:\\foo\\bar\\..') == 'd:\\foo' assert rpath._nt_rabspath('d:\\foo\\bar\\..\\x') == 'd:\\foo\\x' curdrive = _ = rpath._nt_rsplitdrive(os.getcwd()) - assert len(curdrive) == 2 and curdrive[1] == ':' - assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive + assert len(curdrive) == 2 and curdrive[0][1] == ':' + assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive[0] def test_risabs_posix(): assert rpath._posix_risabs('/foo/bar') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -151,7 +151,7 @@ # Increase stack size, for the linker and the stack check code. stack_size = 8 << 20 # 8 Mb - self.link_flags.append('/STACK:%d' % stack_size) + self.link_flags = self.link_flags + ('/STACK:%d' % stack_size,) # The following symbol is used in c/src/stack.h self.cflags.append('/DMAX_STACK_SIZE=%d' % (stack_size - 1024)) From pypy.commits at gmail.com Tue Jan 26 03:07:50 2016 From: pypy.commits at gmail.com (Raemi) Date: Tue, 26 Jan 2016 00:07:50 -0800 (PST) Subject: [pypy-commit] pypy stmgc-c8: clarification Message-ID: <56a72956.85e41c0a.4104c.ffffc1d8@mx.google.com> Author: Remi Meier Branch: stmgc-c8 Changeset: r81945:23f42ac860b5 Date: 2016-01-26 09:08 +0100 http://bitbucket.org/pypy/pypy/changeset/23f42ac860b5/ Log: clarification diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -140,6 +140,12 @@ # was necessary in C7 for others to commit, but in C8 it is only # necessary for requesting major GCs. I think we better avoid this # overhead for tight loops and wait a bit longer in that case. + # ^^^ is not the entire truth: we currently measure the amount of work + # done in a transaction by number of bytes allocated. It means that + # now, tight loops not doing any allocation are not accounted for. + # However, given that not doing these allocations improves + # lee_router_tm.py by a factor of 2.5x, we better deal with it in + # another way. pass # if not self.does_any_allocation: # # do a fake allocation since this is needed to check From pypy.commits at gmail.com Tue Jan 26 03:08:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Jan 2016 00:08:16 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed print statements, typo, removed push floats from stacklet because there is no need to save them Message-ID: <56a72970.d4811c0a.40e2e.ffff818d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81946:62aeb645010b Date: 2016-01-26 09:07 +0100 http://bitbucket.org/pypy/pypy/changeset/62aeb645010b/ Log: removed print statements, typo, removed push floats from stacklet because there is no need to save them diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -287,9 +287,3 @@ all_mnemonic_codes.update(memory_mnemonic_codes) all_mnemonic_codes.update(floatingpoint_mnemonic_codes) all_mnemonic_codes.update(branch_mnemonic_codes) - - -if __name__ == "__main__": - print("%d instructions:" % len(all_mnemonic_codes)) - for name, (typeinstr, _) in all_mnemonic_codes.items(): - print(" %s\t(type: %s)" % (name, typeinstr)) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -131,7 +131,7 @@ if size == -1: size = self.size if size >= 2**19: - msg = '[S390X/literalpool] size exceeded %d >= %d\n' % (size, 2**19-8) + msg = '[S390X/literalpool] size exceeded %d >= %d\n' % (size, 2**19) if we_are_translated(): llop.debug_print(lltype.Void, msg) raise PoolOverflow(msg) @@ -180,13 +180,8 @@ asm.mc.write('\x00' * self.size) wrote = 0 for val, offset in self.offset_map.items(): - if not we_are_translated(): - print('pool: %s at offset: %d' % (val, offset)) self.overwrite_64(asm.mc, offset, val) wrote += 8 - # for the descriptors - if not we_are_translated(): - print "pool with %d quad words" % (self.size // 8) def overwrite_64(self, mc, index, value): index += self.pool_start diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1215,40 +1215,3 @@ raise NotImplementedError(msg) prepare_oplist = [notimplemented] * (rop._LAST + 1) - -if not we_are_translated(): - implemented_count = 0 - total_count = 0 - missing = [] - for key, value in rop.__dict__.items(): - key = key.lower() - if key.startswith('_'): - continue - total_count += 1 - methname = 'prepare_%s' % key - if hasattr(Regalloc, methname): - func = getattr(Regalloc, methname).im_func - prepare_oplist[value] = func - implemented_count += 1 - else: - if not methname.startswith('prepare_vec') and \ - not methname.startswith('prepare_get') and \ - not methname.startswith('prepare_raw') and \ - not methname.startswith('prepare_unicodesetitem') and \ - not methname.startswith('prepare_unicodegetitem') and \ - not methname.startswith('prepare_strgetitem') and \ - not methname.startswith('prepare_strsetitem') and \ - not methname.startswith('prepare_call_loopinvariant') and \ - not methname.startswith('prepare_call_pure') and \ - not methname.startswith('prepare_new') and \ - not methname.startswith('prepare_set'): - missing.append(methname) - else: - implemented_count += 1 - - if __name__ == '__main__': - for m in missing: - print(" " * 4 + m) - print - print("regalloc implements %d of %d = %.2f%% of all resops" % \ - (implemented_count, total_count, (100.0 * implemented_count / total_count))) diff --git a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h --- a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h @@ -12,10 +12,7 @@ "stmg 6,15,48(15)\n" - "std 0,128(15)\n" - "std 2,136(15)\n" - "std 4,144(15)\n" - "std 6,152(15)\n" + "lay 15,-160(15)\n" /* create stack frame */ "lgr 10, %[restore_state]\n" /* save 'restore_state' for later */ "lgr 11, %[extra]\n" /* save 'extra' for later */ @@ -23,9 +20,7 @@ "lgr 2, 15\n" /* arg 1: current (old) stack pointer */ "lgr 3, 11\n" /* arg 2: extra */ - "lay 15,-160(15)\n" /* create stack frame */ "basr 14, 14\n" /* call save_state() */ - "lay 15, 160(15)\n" /* destroy stack frame */ "cgij 2, 0, 8, zero\n" /* skip the rest if the return value is null */ @@ -38,10 +33,8 @@ "lgr 3, 11\n" /* arg 2: extra */ - "lay 15, -160(15)\n" /* create temp stack space for callee to use */ "lgr 14, 10\n" /* load restore_state */ "basr 14, 14\n" /* call restore_state() */ - "lay 15, 160(15)\n" /* destroy temp stack space */ /* The stack's content is now restored. */ @@ -49,13 +42,7 @@ /* Epilogue */ /* no need */ /* restore stack pointer */ - - "ld 0,128(15)\n" - "ld 2,136(15)\n" - "ld 4,144(15)\n" - "ld 6,152(15)\n" - - "lmg 6,15,48(15)\n" + "lmg 6,15,208(15)\n" : "=r"(result) /* output variable: expected to be r2 */ : [restore_state]"r"(restore_state), /* input variables */ diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -698,7 +698,7 @@ # related to libffi issue on s390x, we MUST # overwrite the full ffi result which is 64 bit # if not, this leaves garbage in the return value - # and qsort does not sorrt correctly + # and qsort does not sort correctly res = rffi.cast(rffi.SIGNEDP, ll_res) if a1 > a2: res[0] = 1 From pypy.commits at gmail.com Tue Jan 26 03:36:48 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Jan 2016 00:36:48 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: simplified guard_class Message-ID: <56a73020.0c2e1c0a.18f57.ffffc9e4@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81947:1589c0b36069 Date: 2016-01-26 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/1589c0b36069/ Log: simplified guard_class diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -716,9 +716,6 @@ def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): self._read_typeid(r.SCRATCH2, loc_ptr) assert 0 <= expected_typeid <= 0x7fffffff # 4 bytes are always enough - if expected_typeid > 0xffff: # if 2 bytes are not enough - self.mc.AGHI(r.SCRATCH2, l.imm(-(expected_typeid >> 16))) - expected_typeid = expected_typeid & 0xffff self.mc.cmp_op(r.SCRATCH2, l.imm(expected_typeid), imm=True, signed=False) From pypy.commits at gmail.com Tue Jan 26 08:02:10 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Jan 2016 05:02:10 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: gotcha, guard nonnull class was implemented incorrectly (substracted value that should have been shifted 16 bits to the left) Message-ID: <56a76e52.88c8c20a.365f1.ffff83b3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81948:acf6ac9d0d3d Date: 2016-01-26 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/acf6ac9d0d3d/ Log: gotcha, guard nonnull class was implemented incorrectly (substracted value that should have been shifted 16 bits to the left) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -542,7 +542,6 @@ mc.restore_link() # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) - mc.trap() # debug if this is EVER executed! # # Else, jump to propagate_exception_path assert self.propagate_exception_path diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -682,11 +682,10 @@ def emit_guard_nonnull_class(self, op, arglocs, regalloc): self.mc.cmp_op(arglocs[0], l.imm(1), imm=True, signed=False) patch_pos = self.mc.currpos() - self.mc.trap() - self.mc.write('\x00' * 4) + self.mc.reserve_cond_jump(short=True) self._cmp_guard_class(op, arglocs, regalloc) pmc = OverwritingBuilder(self.mc, patch_pos, 1) - pmc.BRCL(c.LT, l.imm(self.mc.currpos() - patch_pos)) + pmc.BRC(c.LT, l.imm(self.mc.currpos() - patch_pos)) pmc.overwrite() self.guard_success_cc = c.EQ self._emit_guard(op, arglocs[2:]) @@ -716,6 +715,7 @@ def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): self._read_typeid(r.SCRATCH2, loc_ptr) assert 0 <= expected_typeid <= 0x7fffffff # 4 bytes are always enough + # we can handle 4 byte compare immediate self.mc.cmp_op(r.SCRATCH2, l.imm(expected_typeid), imm=True, signed=False) From pypy.commits at gmail.com Tue Jan 26 10:46:06 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Jan 2016 07:46:06 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed translation issue. reverted the stacklet changes (did not pass test) Message-ID: <56a794be.2851c20a.bd9fa.ffffc408@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81949:fb7bbc354de2 Date: 2016-01-26 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/fb7bbc354de2/ Log: fixed translation issue. reverted the stacklet changes (did not pass test) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1215,3 +1215,41 @@ raise NotImplementedError(msg) prepare_oplist = [notimplemented] * (rop._LAST + 1) + +if not we_are_translated(): + implemented_count = 0 + total_count = 0 + missing = [] + for key, value in rop.__dict__.items(): + key = key.lower() + if key.startswith('_'): + continue + total_count += 1 + methname = 'prepare_%s' % key + if hasattr(Regalloc, methname): + func = getattr(Regalloc, methname).im_func + prepare_oplist[value] = func + implemented_count += 1 + else: + if not methname.startswith('prepare_vec') and \ + not methname.startswith('prepare_get') and \ + not methname.startswith('prepare_raw') and \ + not methname.startswith('prepare_unicodesetitem') and \ + not methname.startswith('prepare_unicodegetitem') and \ + not methname.startswith('prepare_strgetitem') and \ + not methname.startswith('prepare_strsetitem') and \ + not methname.startswith('prepare_call_loopinvariant') and \ + not methname.startswith('prepare_call_pure') and \ + not methname.startswith('prepare_new') and \ + not methname.startswith('prepare_set'): + missing.append(methname) + else: + implemented_count += 1 + + if __name__ == '__main__': + for m in missing: + print(" " * 4 + m) + print + print("regalloc implements %d of %d = %.2f%% of all resops" % \ + (implemented_count, total_count, (100.0 * implemented_count / total_count))) + diff --git a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h --- a/rpython/translator/c/src/stacklet/switch_s390x_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_s390x_gcc.h @@ -12,7 +12,10 @@ "stmg 6,15,48(15)\n" - "lay 15,-160(15)\n" /* create stack frame */ + "std 0,128(15)\n" + "std 2,136(15)\n" + "std 4,144(15)\n" + "std 6,152(15)\n" "lgr 10, %[restore_state]\n" /* save 'restore_state' for later */ "lgr 11, %[extra]\n" /* save 'extra' for later */ @@ -20,7 +23,9 @@ "lgr 2, 15\n" /* arg 1: current (old) stack pointer */ "lgr 3, 11\n" /* arg 2: extra */ + "lay 15,-160(15)\n" /* create stack frame */ "basr 14, 14\n" /* call save_state() */ + "lay 15, 160(15)\n" /* destroy stack frame */ "cgij 2, 0, 8, zero\n" /* skip the rest if the return value is null */ @@ -33,8 +38,10 @@ "lgr 3, 11\n" /* arg 2: extra */ + "lay 15, -160(15)\n" /* create temp stack space for callee to use */ "lgr 14, 10\n" /* load restore_state */ "basr 14, 14\n" /* call restore_state() */ + "lay 15, 160(15)\n" /* destroy temp stack space */ /* The stack's content is now restored. */ @@ -42,7 +49,13 @@ /* Epilogue */ /* no need */ /* restore stack pointer */ - "lmg 6,15,208(15)\n" + + "ld 0,128(15)\n" + "ld 2,136(15)\n" + "ld 4,144(15)\n" + "ld 6,152(15)\n" + + "lmg 6,15,48(15)\n" : "=r"(result) /* output variable: expected to be r2 */ : [restore_state]"r"(restore_state), /* input variables */ From pypy.commits at gmail.com Tue Jan 26 10:46:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 26 Jan 2016 07:46:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged head Message-ID: <56a794c0.8205c20a.b6a17.ffffc2da@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81950:3d78eb62b0e5 Date: 2016-01-26 16:44 +0100 http://bitbucket.org/pypy/pypy/changeset/3d78eb62b0e5/ Log: merged head diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -542,7 +542,6 @@ mc.restore_link() # So we return to our caller, conditionally if "EQ" mc.BCR(c.EQ, r.r14) - mc.trap() # debug if this is EVER executed! # # Else, jump to propagate_exception_path assert self.propagate_exception_path diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -682,11 +682,10 @@ def emit_guard_nonnull_class(self, op, arglocs, regalloc): self.mc.cmp_op(arglocs[0], l.imm(1), imm=True, signed=False) patch_pos = self.mc.currpos() - self.mc.trap() - self.mc.write('\x00' * 4) + self.mc.reserve_cond_jump(short=True) self._cmp_guard_class(op, arglocs, regalloc) pmc = OverwritingBuilder(self.mc, patch_pos, 1) - pmc.BRCL(c.LT, l.imm(self.mc.currpos() - patch_pos)) + pmc.BRC(c.LT, l.imm(self.mc.currpos() - patch_pos)) pmc.overwrite() self.guard_success_cc = c.EQ self._emit_guard(op, arglocs[2:]) @@ -716,9 +715,7 @@ def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): self._read_typeid(r.SCRATCH2, loc_ptr) assert 0 <= expected_typeid <= 0x7fffffff # 4 bytes are always enough - if expected_typeid > 0xffff: # if 2 bytes are not enough - self.mc.AGHI(r.SCRATCH2, l.imm(-(expected_typeid >> 16))) - expected_typeid = expected_typeid & 0xffff + # we can handle 4 byte compare immediate self.mc.cmp_op(r.SCRATCH2, l.imm(expected_typeid), imm=True, signed=False) From pypy.commits at gmail.com Tue Jan 26 13:11:17 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 10:11:17 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: Copy more code from cpyext-gc-support Message-ID: <56a7b6c5.e218c20a.e673a.03b5@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81951:23ac4994b18d Date: 2016-01-26 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/23ac4994b18d/ Log: Copy more code from cpyext-gc-support diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -192,7 +192,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None): + c_name=None, gil=None, result_borrowed=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -209,17 +209,15 @@ self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil + self.result_borrowed = result_borrowed + # + def get_llhelper(space): + return llhelper(self.functype, self.get_wrapper(space)) + self.get_llhelper = get_llhelper def _freeze_(self): return True - def get_llhelper(self, space): - llh = getattr(self, '_llhelper', None) - if llh is None: - llh = llhelper(self.functype, self.get_wrapper(space)) - self._llhelper = llh - return llh - @specialize.memo() def get_wrapper(self, space): wrapper = getattr(self, '_wrapper', None) @@ -232,7 +230,7 @@ return wrapper def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, external=True, - gil=None): + gil=None, result_borrowed=False): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. @@ -261,13 +259,15 @@ rffi.cast(restype, 0) == 0) def decorate(func): + func._always_inline_ = 'try' func_name = func.func_name if external: c_name = None else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, - c_name=c_name, gil=gil) + c_name=c_name, gil=gil, + result_borrowed=result_borrowed) func.api_func = api_function if external: @@ -278,6 +278,10 @@ raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, @@ -289,52 +293,55 @@ from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import Reference newargs = () - to_decref = [] + keepalives = () assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: - # build a reference - if input_arg is None: - arg = lltype.nullptr(PyObject.TO) - elif isinstance(input_arg, W_Root): - ref = make_ref(space, input_arg) - to_decref.append(ref) - arg = rffi.cast(ARG, ref) + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_xpyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif is_PyObject(ARG) and is_wrapped: + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_pyobj(space, input_arg) else: arg = input_arg - elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object - if input_arg is None: - arg = input_arg - elif isinstance(input_arg, W_Root): - arg = input_arg - else: - try: - arg = from_ref(space, - rffi.cast(PyObject, input_arg)) - except TypeError, e: - err = OperationError(space.w_TypeError, - space.wrap( - "could not cast arg to PyObject")) - if not catch_exception: - raise err - state = space.fromcache(State) - state.set_exception(err) - if is_PyObject(restype): - return None - else: - return api_function.error_value + + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = OperationError(space.w_TypeError, + ## space.wrap( + ## "could not cast arg to PyObject")) + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # convert to a wrapped object + # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) - try: + if not catch_exception: + try: + res = func(space, *newargs) + finally: + keepalive_until_here(*keepalives) + else: + # non-rpython variant + assert not we_are_translated() try: res = func(space, *newargs) except OperationError, e: - if not catch_exception: - raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) @@ -343,21 +350,13 @@ return None else: return api_function.error_value - if not we_are_translated(): - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer,'got %r not integer' % res - if res is None: - return None - elif isinstance(res, Reference): - return res.get_wrapped(space) - else: - return res - finally: - for arg in to_decref: - Py_DecRef(space, arg) + # 'keepalives' is alive here (it's not rpython) + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res unwrapper.func = func unwrapper.api_func = api_function - unwrapper._always_inline_ = 'try' return unwrapper unwrapper_catch = make_unwrapper(True) @@ -625,6 +624,7 @@ llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value boxed_args = () + tb = None try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, @@ -632,10 +632,8 @@ for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: - if arg: - arg_conv = from_ref(space, rffi.cast(PyObject, arg)) - else: - arg_conv = None + assert is_pyobj(arg) + arg_conv = from_pyobj(space, arg) else: arg_conv = arg boxed_args += (arg_conv, ) @@ -650,6 +648,7 @@ except BaseException, e: failed = True if not we_are_translated(): + tb = sys.exc_info()[2] message = repr(e) import traceback traceback.print_exc() @@ -668,29 +667,34 @@ retval = error_value elif is_PyObject(callable.api_func.restype): - if result is None: - retval = rffi.cast(callable.api_func.restype, - make_ref(space, None)) - elif isinstance(result, Reference): - retval = result.get_ref(space) - elif not rffi._isllptr(result): - retval = rffi.cast(callable.api_func.restype, - make_ref(space, result)) + if is_pyobj(result): + retval = result else: - retval = result + if result is None: + if callable.api_func.result_borrowed: + retval = as_pyobj(space, result) + else: + retval = get_pyobj_and_incref(space, result) + retval = rffi.cast(callable.api_func.restype, retval) + else: + retval = lltype.nullptr(PyObject.TO) elif callable.api_func.restype is not lltype.Void: retval = rffi.cast(callable.api_func.restype, result) except Exception, e: print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ print 'Either report a bug or consider not using this particular extension' if not we_are_translated(): + if tb is None: + tb = sys.exc_info()[2] import traceback traceback.print_exc() - print str(e) + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) # we can't do much here, since we're in ctypes, swallow else: print str(e) pypy_debug_catch_fatal_exception() + assert False rffi.stackcounter.stacks_counter -= 1 if gil_release: rgil.release() @@ -843,7 +847,7 @@ # populate static data for name, (typ, expr) in GLOBALS.iteritems(): - from pypy.module import cpyext + from pypy.module import cpyext # for the eval() below w_obj = eval(expr) if name.endswith('#'): name = name[:-1] @@ -1204,20 +1208,15 @@ @specialize.ll() def generic_cpy_call(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, False)(space, func, *args) - - at specialize.ll() -def generic_cpy_call_dont_decref(space, func, *args): - FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, False, False)(space, func, *args) + return make_generic_cpy_call(FT, False)(space, func, *args) @specialize.ll() def generic_cpy_call_expect_null(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, True)(space, func, *args) + return make_generic_cpy_call(FT, True)(space, func, *args) @specialize.memo() -def make_generic_cpy_call(FT, decref_args, expect_null): +def make_generic_cpy_call(FT, expect_null): from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_Occurred @@ -1247,65 +1246,49 @@ @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () - to_decref = [] + keepalives = () assert len(args) == len(FT.ARGS) for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): - if arg is None: - boxed_args += (lltype.nullptr(PyObject.TO),) - elif isinstance(arg, W_Root): - ref = make_ref(space, arg) - boxed_args += (ref,) - if decref_args: - to_decref.append(ref) - else: - boxed_args += (arg,) - else: - boxed_args += (arg,) + if not is_pyobj(arg): + keepalives += (arg,) + arg = as_pyobj(space, arg) + boxed_args += (arg,) try: - # create a new container for borrowed references - state = space.fromcache(RefcountState) - old_container = state.swap_borrow_container(None) - try: - # Call the function - result = call_external_function(func, *boxed_args) - finally: - state.swap_borrow_container(old_container) + # Call the function + result = call_external_function(func, *boxed_args) + finally: + keepalive_until_here(*keepalives) - if is_PyObject(RESULT_TYPE): - if result is None: - ret = result - elif isinstance(result, W_Root): - ret = result + if is_PyObject(RESULT_TYPE): + if not is_pyobj(result): + ret = result + else: + # The object reference returned from a C function + # that is called from Python must be an owned reference + # - ownership is transferred from the function to its caller. + if result: + ret = get_w_obj_and_decref(space, result) else: - ret = from_ref(space, result) - # The object reference returned from a C function - # that is called from Python must be an owned reference - # - ownership is transferred from the function to its caller. - if result: - Py_DecRef(space, result) + ret = None - # Check for exception consistency - has_error = PyErr_Occurred(space) is not None - has_result = ret is not None - if has_error and has_result: - raise OperationError(space.w_SystemError, space.wrap( - "An exception was set, but function returned a value")) - elif not expect_null and not has_error and not has_result: - raise OperationError(space.w_SystemError, space.wrap( - "Function returned a NULL result without setting an exception")) + # Check for exception consistency + has_error = PyErr_Occurred(space) is not None + has_result = ret is not None + if has_error and has_result: + raise OperationError(space.w_SystemError, space.wrap( + "An exception was set, but function returned a value")) + elif not expect_null and not has_error and not has_result: + raise OperationError(space.w_SystemError, space.wrap( + "Function returned a NULL result without setting an exception")) - if has_error: - state = space.fromcache(State) - state.check_and_raise_exception() + if has_error: + state = space.fromcache(State) + state.check_and_raise_exception() - return ret - return result - finally: - if decref_args: - for ref in to_decref: - Py_DecRef(space, ref) + return ret + return result + return generic_cpy_call - diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -150,6 +150,7 @@ Allocates a PyObject, and fills its fields with info from the given intepreter object. """ + GOES_AWAY state = space.fromcache(RefcountState) w_type = space.type(w_obj) if w_type.is_cpytype(): @@ -169,6 +170,7 @@ """ Ties together a PyObject and an interpreter object. """ + GOES_AWAY # XXX looks like a PyObject_GC_TRACK ptr = rffi.cast(ADDR, py_obj) state = space.fromcache(RefcountState) @@ -185,6 +187,7 @@ """ Returns a new reference to an intepreter object. """ + GOES_AWAY if w_obj is None: return lltype.nullptr(PyObject.TO) assert isinstance(w_obj, W_Root) @@ -204,6 +207,7 @@ Finds the interpreter object corresponding to the given reference. If the object is not yet realized (see stringobject.py), creates it. """ + GOES_AWAY assert lltype.typeOf(ref) == PyObject if not ref: return None @@ -228,6 +232,7 @@ # XXX Optimize these functions and put them into macro definitions @cpython_api([PyObject], lltype.Void) def Py_DecRef(space, obj): + ZZZ if not obj: return assert lltype.typeOf(obj) == PyObject @@ -284,3 +289,11 @@ @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) def _Py_HashPointer(space, ptr): return rffi.cast(lltype.Signed, ptr) + + +class RefcountState: + def __init__(self, *args): + GOES_AWAY + +def borrow_from(container, borrowed): + GOES_AWAY From pypy.commits at gmail.com Tue Jan 26 13:11:19 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 10:11:19 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: more copying Message-ID: <56a7b6c7.cf0b1c0a.4d6ce.719b@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81952:a6a694f2a019 Date: 2016-01-26 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/a6a694f2a019/ Log: more copying diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -17,7 +17,8 @@ #define staticforward static #define PyObject_HEAD \ - long ob_refcnt; \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ struct _typeobject *ob_type; #define PyObject_VAR_HEAD \ @@ -25,7 +26,7 @@ Py_ssize_t ob_size; /* Number of items in variable part */ #define PyObject_HEAD_INIT(type) \ - 1, type, + 1, 0, type, #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, @@ -40,19 +41,19 @@ #ifdef PYPY_DEBUG_REFCOUNT /* Slow version, but useful for debugging */ -#define Py_INCREF(ob) (Py_IncRef((PyObject *)ob)) -#define Py_DECREF(ob) (Py_DecRef((PyObject *)ob)) -#define Py_XINCREF(ob) (Py_IncRef((PyObject *)ob)) -#define Py_XDECREF(ob) (Py_DecRef((PyObject *)ob)) +#define Py_INCREF(ob) (Py_IncRef((PyObject *)(ob))) +#define Py_DECREF(ob) (Py_DecRef((PyObject *)(ob))) +#define Py_XINCREF(ob) (Py_IncRef((PyObject *)(ob))) +#define Py_XDECREF(ob) (Py_DecRef((PyObject *)(ob))) #else /* Fast version */ -#define Py_INCREF(ob) (((PyObject *)ob)->ob_refcnt++) -#define Py_DECREF(ob) \ +#define Py_INCREF(ob) (((PyObject *)(ob))->ob_refcnt++) +#define Py_DECREF(op) \ do { \ - if (((PyObject *)ob)->ob_refcnt > 1) \ - ((PyObject *)ob)->ob_refcnt--; \ + if (--((PyObject *)(op))->ob_refcnt != 0) \ + ; \ else \ - Py_DecRef((PyObject *)ob); \ + _Py_Dealloc((PyObject *)(op)); \ } while (0) #define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -134,6 +134,9 @@ #________________________________________________________ # refcounted object support +class InvalidPointerException(Exception): + pass + DEBUG_REFCOUNT = False def debug_refcount(*args, **kwargs): @@ -229,48 +232,13 @@ return get_typedescr(w_type.instancetypedef).realize(space, ref) -# XXX Optimize these functions and put them into macro definitions + at cpython_api([PyObject], lltype.Void) +def Py_IncRef(space, obj): + incref(obj) + @cpython_api([PyObject], lltype.Void) def Py_DecRef(space, obj): - ZZZ - if not obj: - return - assert lltype.typeOf(obj) == PyObject - - obj.c_ob_refcnt -= 1 - if DEBUG_REFCOUNT: - debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) - if obj.c_ob_refcnt == 0: - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, obj) - if ptr not in state.py_objects_r2w: - # this is a half-allocated object, lets call the deallocator - # without modifying the r2w/w2r dicts - _Py_Dealloc(space, obj) - else: - w_obj = state.py_objects_r2w[ptr] - del state.py_objects_r2w[ptr] - w_type = space.type(w_obj) - if not w_type.is_cpytype(): - _Py_Dealloc(space, obj) - del state.py_objects_w2r[w_obj] - # if the object was a container for borrowed references - state.delete_borrower(w_obj) - else: - if not we_are_translated() and obj.c_ob_refcnt < 0: - message = "Negative refcount for obj %s with type %s" % ( - obj, rffi.charp2str(obj.c_ob_type.c_tp_name)) - print >>sys.stderr, message - assert False, message - - at cpython_api([PyObject], lltype.Void) -def Py_IncRef(space, obj): - if not obj: - return - obj.c_ob_refcnt += 1 - assert obj.c_ob_refcnt > 0 - if DEBUG_REFCOUNT: - debug_refcount("INCREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) + decref(space, obj) @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): @@ -279,6 +247,7 @@ assert isinstance(w_type, W_TypeObject) get_typedescr(w_type.instancetypedef).realize(space, obj) + at cpython_api([PyObject], lltype.Void) def _Py_Dealloc(space, obj): from pypy.module.cpyext.api import generic_cpy_call_dont_decref pto = obj.c_ob_type From pypy.commits at gmail.com Tue Jan 26 13:11:21 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 10:11:21 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: in-progress Message-ID: <56a7b6c9.4c301c0a.ac10c.6936@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81953:3d1f9e7d01e1 Date: 2016-01-26 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/3d1f9e7d01e1/ Log: in-progress diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -2,9 +2,11 @@ from pypy.interpreter.baseobjspace import W_Root, SpaceCache from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.extregistry import ExtRegistryEntry from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, - CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) + CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, + INTERPLEVEL_API) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject @@ -151,10 +153,9 @@ def create_ref(space, w_obj, itemcount=0): """ Allocates a PyObject, and fills its fields with info from the given - intepreter object. + interpreter object. """ - GOES_AWAY - state = space.fromcache(RefcountState) + #state = space.fromcache(RefcountState) w_type = space.type(w_obj) if w_type.is_cpytype(): py_obj = state.get_from_lifeline(w_obj) @@ -173,18 +174,16 @@ """ Ties together a PyObject and an interpreter object. """ - GOES_AWAY # XXX looks like a PyObject_GC_TRACK - ptr = rffi.cast(ADDR, py_obj) - state = space.fromcache(RefcountState) + assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY + py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY if DEBUG_REFCOUNT: debug_refcount("MAKREF", py_obj, w_obj) + assert w_obj + assert py_obj if not replace: assert w_obj not in state.py_objects_w2r - assert ptr not in state.py_objects_r2w - state.py_objects_w2r[w_obj] = py_obj - if ptr: # init_typeobject() bootstraps with NULL references - state.py_objects_r2w[ptr] = w_obj + rawrefcount.create_link_pypy(py_obj, w_obj) def make_ref(space, w_obj): """ @@ -232,6 +231,124 @@ return get_typedescr(w_type.instancetypedef).realize(space, ref) +def debug_collect(): + rawrefcount._collect(track_allocation=False) + + +def as_pyobj(space, w_obj): + """ + Returns a 'PyObject *' representing the given intepreter object. + This doesn't give a new reference, but the returned 'PyObject *' + is valid at least as long as 'w_obj' is. To be safe, you should + use keepalive_until_here(w_obj) some time later. + + NOTE: get_pyobj_and_incref() is safer. + """ + if w_obj is not None: + assert not is_pyobj(w_obj) + return XXXXXXXXXXX + else: + return lltype.nullptr(PyObject.TO) +as_pyobj._always_inline_ = 'try' +INTERPLEVEL_API['as_pyobj'] = as_pyobj + +def pyobj_has_w_obj(pyobj): + return rawrefcount.to_obj(W_Root, pyobj) is not None +INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) + + at specialize.ll() +def from_pyobj(space, pyobj): + assert is_pyobj(pyobj) + if pyobj: + pyobj = rffi.cast(PyObject, pyobj) + w_obj = rawrefcount.to_obj(W_Root, pyobj) + if w_obj is None: + XXXXXXXXXXX + return w_obj + else: + return None +from_pyobj._always_inline_ = 'try' +INTERPLEVEL_API['from_pyobj'] = from_pyobj + + +def is_pyobj(x): + if x is None or isinstance(x, W_Root): + return False + elif is_PyObject(lltype.typeOf(x)): + return True + else: + raise TypeError(repr(type(x))) +INTERPLEVEL_API['is_pyobj'] = staticmethod(is_pyobj) + +class Entry(ExtRegistryEntry): + _about_ = is_pyobj + def compute_result_annotation(self, s_x): + from rpython.rtyper.llannotation import SomePtr + return self.bookkeeper.immutablevalue(isinstance(s_x, SomePtr)) + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.inputconst(lltype.Bool, hop.s_result.const) + + at specialize.ll() +def get_pyobj_and_incref(space, obj): + """Increment the reference counter of the PyObject and return it. + Can be called with either a PyObject or a W_Root. + """ + if obj: + if is_pyobj(obj): + pyobj = rffi.cast(PyObject, obj) + else: + pyobj = as_pyobj(space, obj) + assert pyobj.c_ob_refcnt > 0 + pyobj.c_ob_refcnt += 1 + if not is_pyobj(obj): + keepalive_until_here(obj) + return pyobj + else: + return lltype.nullptr(PyObject.TO) +INTERPLEVEL_API['get_pyobj_and_incref'] = get_pyobj_and_incref + + + at specialize.ll() +def get_w_obj_and_decref(space, obj): + """Decrement the reference counter of the PyObject and return the + corresponding W_Root object (so the reference count is at least + REFCNT_FROM_PYPY and cannot be zero). Can be called with either + a PyObject or a W_Root. + """ + if is_pyobj(obj): + pyobj = rffi.cast(PyObject, obj) + w_obj = from_pyobj(space, pyobj) + else: + w_obj = obj + pyobj = as_pyobj(space, w_obj) + if pyobj: + pyobj.c_ob_refcnt -= 1 + assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY + keepalive_until_here(w_obj) + return w_obj +INTERPLEVEL_API['get_w_obj_and_decref'] = get_w_obj_and_decref + + + at specialize.ll() +def incref(space, obj): + get_pyobj_and_incref(space, obj) +INTERPLEVEL_API['incref'] = incref + + at specialize.ll() +def decref(space, obj): + if is_pyobj(obj): + obj = rffi.cast(PyObject, obj) + if obj: + assert obj.c_ob_refcnt > 0 + obj.c_ob_refcnt -= 1 + if obj.c_ob_refcnt == 0: + _Py_Dealloc(space, obj) + else: + get_w_obj_and_decref(space, obj) +INTERPLEVEL_API['decref'] = decref + + @cpython_api([PyObject], lltype.Void) def Py_IncRef(space, obj): incref(obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -21,7 +21,8 @@ from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from, Py_DecRef) + track_reference, RefcountState, borrow_from, Py_DecRef, + get_pyobj_and_incref) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State @@ -320,13 +321,13 @@ # - object.tp_bases is a tuple # - tuple.tp_bases is a tuple - # insert null placeholders to please create_ref() - track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_str) - - # create the objects + # we create the types manually here + py_type = _type_alloc(space, lltype.nullptr(PyTypeObject)) + py_object = _type_alloc(space, lltype.nullptr(PyTypeObject)) + py_tuple = _type_alloc(space, lltype.nullptr(PyTypeObject)) + py_str = _type_alloc(space, lltype.nullptr(PyTypeObject)) + ... + py_type = create_ref(space, space.w_type) py_object = create_ref(space, space.w_object) py_tuple = create_ref(space, space.w_tuple) @@ -459,13 +460,16 @@ def type_alloc(space, w_metatype): - metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype)) + metatype = get_pyobj_and_incref(space, w_metatype) + metatype = rffi.cast(PyTypeObjectPtr, metatype) + assert metatype # Don't increase refcount for non-heaptypes - if metatype: - flags = rffi.cast(lltype.Signed, metatype.c_tp_flags) - if not flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, w_metatype) + flags = rffi.cast(lltype.Signed, metatype.c_tp_flags) + if not flags & Py_TPFLAGS_HEAPTYPE: + Py_DecRef(space, w_metatype) + return _type_alloc(space, metatype) +def _type_alloc(space, metatype): heaptype = lltype.malloc(PyHeapTypeObject.TO, flavor='raw', zero=True) pto = heaptype.c_ht_type From pypy.commits at gmail.com Tue Jan 26 15:39:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Jan 2016 12:39:29 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Close branch exctrans Message-ID: <56a7d981.d69c1c0a.90d26.ffffa6c3@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81954:e953e5f78446 Date: 2016-01-26 20:39 +0000 http://bitbucket.org/pypy/pypy/changeset/e953e5f78446/ Log: Close branch exctrans From pypy.commits at gmail.com Tue Jan 26 15:39:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 26 Jan 2016 12:39:44 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in exctrans (pull request #390) Message-ID: <56a7d990.c177c20a.46fc9.3c56@mx.google.com> Author: Ronan Lamy Branch: Changeset: r81955:467f4a616ad1 Date: 2016-01-26 20:39 +0000 http://bitbucket.org/pypy/pypy/changeset/467f4a616ad1/ Log: Merged in exctrans (pull request #390) Some refactoring of databasing diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -74,7 +74,7 @@ def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size, c_offset_to_length): - # XXX same behavior for zero=True: in theory that's wrong + # XXX same behavior for zero=True: in theory that's wrong if c_offset_to_length is None: v_raw = hop.genop("direct_call", [self.malloc_varsize_no_length_ptr, v_length, @@ -156,6 +156,11 @@ resulttype = lltype.Signed) hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result) + def gcheader_initdata(self, defnode): + hdr = lltype.malloc(self.HDR, immortal=True) + hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr()) + return hdr._obj + ########## weakrefs ########## # Boehm: weakref objects are small structures containing only a Boehm diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -288,7 +288,6 @@ s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata - translator = self.translator #use the GC flag to find which malloc method to use #malloc_zero_filled == Ture -> malloc_fixedsize/varsize_clear #malloc_zero_filled == Flase -> malloc_fixedsize/varsize @@ -322,7 +321,7 @@ GCClass.malloc_varsize.im_func, [s_gc, s_typeid16] + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) - + self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, @@ -1385,7 +1384,7 @@ [v] + previous_steps + [c_name, c_null]) else: llops.genop('bare_setfield', [v, c_name, c_null]) - + return elif isinstance(TYPE, lltype.Array): ITEM = TYPE.OF @@ -1412,6 +1411,25 @@ resulttype=llmemory.Address) llops.genop('raw_memclear', [v_adr, v_totalsize]) + def gcheader_initdata(self, defnode): + o = lltype.top_container(defnode.obj) + needs_hash = self.get_prebuilt_hash(o) is not None + hdr = self.gc_header_for(o, needs_hash) + return hdr._obj + + def get_prebuilt_hash(self, obj): + # for prebuilt objects that need to have their hash stored and + # restored. Note that only structures that are StructNodes all + # the way have their hash stored (and not e.g. structs with var- + # sized arrays at the end). 'obj' must be the top_container. + TYPE = lltype.typeOf(obj) + if not isinstance(TYPE, lltype.GcStruct): + return None + if TYPE._is_varsize(): + return None + return getattr(obj, '_hash_cache_', None) + + class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder): diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -285,3 +285,7 @@ resulttype=llmemory.Address) hop.genop("direct_call", [self.identityhash_ptr, v_adr], resultvar=hop.spaceop.result) + + def gcheader_initdata(self, defnode): + top = lltype.top_container(defnode.obj) + return self.gcheaderbuilder.header_of_object(top)._obj diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -40,7 +40,7 @@ t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.build_database() entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph @@ -69,7 +69,7 @@ return -x t = rtype(g, [int]) gg = graphof(t, g) - assert not CollectAnalyzer(t).analyze_direct_call(gg) + assert not CollectAnalyzer(t).analyze_direct_call(gg) def test_cancollect_external(): fext1 = rffi.llexternal('fext1', [], lltype.Void, releasegil=False) @@ -110,12 +110,12 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.build_database() def test_no_collect_detection(): from rpython.rlib import rgc @@ -134,12 +134,13 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) + with py.test.raises(Exception) as f: + cbuild.build_database() expected = "'no_collect' function can trigger collection: 0: size = rffi.cast(rffi.SIZE_T, length) @@ -85,15 +85,24 @@ return loader def reraise_error(error, loader): - if error == 1: raise OSError(load_int(loader), "external error") - elif error == 2: raise IOError - elif error == 3: raise OverflowError - elif error == 4: raise ValueError - elif error == 5: raise ZeroDivisionError - elif error == 6: raise MemoryError - elif error == 7: raise KeyError - elif error == 8: raise IndexError - else: raise RuntimeError + if error == 1: + raise OSError(load_int(loader), "external error") + elif error == 2: + raise IOError + elif error == 3: + raise OverflowError + elif error == 4: + raise ValueError + elif error == 5: + raise ZeroDivisionError + elif error == 6: + raise MemoryError + elif error == 7: + raise KeyError + elif error == 8: + raise IndexError + else: + raise RuntimeError @signature(types.str(), returns=types.impossible()) @@ -101,51 +110,46 @@ STDERR = 2 with rffi.scoped_str2charp(msg + '\n') as buf: writeall_not_sandboxed(STDERR, buf, len(msg) + 1) - raise RuntimeError(msg) # XXX in RPython, the msg is ignored at the moment + raise RuntimeError(msg) # XXX in RPython, the msg is ignored + +def make_stub(fnname, msg): + """Build always-raising stub function to replace unsupported external.""" + log.WARNING(msg) + + def execute(*args): + not_implemented_stub(msg) + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute + +def sig_ll(fnobj): + FUNCTYPE = lltype.typeOf(fnobj) + args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] + s_result = lltype_to_annotation(FUNCTYPE.RESULT) + return args_s, s_result dump_string = rmarshal.get_marshaller(str) -load_int = rmarshal.get_loader(int) +load_int = rmarshal.get_loader(int) -def get_external_function_sandbox_graph(fnobj, db, force_stub=False): - """Build the graph of a helper trampoline function to be used - in place of real calls to the external function 'fnobj'. The - trampoline marshals its input arguments, dumps them to STDOUT, - and waits for an answer on STDIN. +def get_sandbox_stub(fnobj, rtyper): + fnname = fnobj._name + args_s, s_result = sig_ll(fnobj) + msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) + execute = make_stub(fnname, msg) + return _annotate(rtyper, execute, args_s, s_result) + +def make_sandbox_trampoline(fnname, args_s, s_result): + """Create a trampoline function with the specified signature. + + The trampoline is meant to be used in place of real calls to the external + function named 'fnname'. It marshals its input arguments, dumps them to + STDOUT, and waits for an answer on STDIN. """ - if getattr(getattr(fnobj, '_callable', None), - '_sandbox_external_name', None): - fnname = fnobj._callable._sandbox_external_name - else: - fnname = fnobj._name - if hasattr(fnobj, 'graph'): - # get the annotation of the input arguments and the result - graph = fnobj.graph - annotator = db.translator.annotator - args_s = [annotator.binding(v) for v in graph.getargs()] - s_result = annotator.binding(graph.getreturnvar()) - else: - # pure external function - fall back to the annotations - # corresponding to the ll types - FUNCTYPE = lltype.typeOf(fnobj) - args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] - s_result = lltype_to_annotation(FUNCTYPE.RESULT) - try: - if force_stub: # old case - don't try to support suggested_primitive - raise NotImplementedError("sandboxing for external function '%s'" - % (fnname,)) - dump_arguments = rmarshal.get_marshaller(tuple(args_s)) load_result = rmarshal.get_loader(s_result) - - except (NotImplementedError, - rmarshal.CannotMarshal, - rmarshal.CannotUnmarshall), e: - msg = 'Not Implemented: %s' % (e,) - log.WARNING(msg) - def execute(*args): - not_implemented_stub(msg) - + except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e: + msg = "Cannot sandbox function '%s': %s" % (fnname, e) + execute = make_stub(fnname, msg) else: def execute(*args): # marshal the function name and input arguments @@ -158,9 +162,12 @@ result = load_result(loader) loader.check_finished() return result - execute = func_with_new_name(execute, 'sandboxed_' + fnname) + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute - ann = MixLevelHelperAnnotator(db.translator.rtyper) - graph = ann.getgraph(execute, args_s, s_result) + +def _annotate(rtyper, f, args_s, s_result): + ann = MixLevelHelperAnnotator(rtyper) + graph = ann.getgraph(f, args_s, s_result) ann.finish() return graph From pypy.commits at gmail.com Tue Jan 26 17:36:36 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 14:36:36 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: in-progress Message-ID: <56a7f4f4.c177c20a.46fc9.5df3@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81956:20a0b177ddae Date: 2016-01-26 23:10 +0100 http://bitbucket.org/pypy/pypy/changeset/20a0b177ddae/ Log: in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,7 @@ from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib import rawrefcount DEBUG_WRAPPER = True @@ -825,14 +826,6 @@ outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) - run_bootstrap_functions(space) - - # load the bridge, and init structure - import ctypes - bridge = ctypes.CDLL(str(modulename), mode=ctypes.RTLD_GLOBAL) - - space.fromcache(State).install_dll(eci) - def dealloc_trigger(): print 'dealloc_trigger...' while True: @@ -845,6 +838,14 @@ return "RETRY" rawrefcount.init(dealloc_trigger) + run_bootstrap_functions(space) + + # load the bridge, and init structure + import ctypes + bridge = ctypes.CDLL(str(modulename), mode=ctypes.RTLD_GLOBAL) + + space.fromcache(State).install_dll(eci) + # populate static data for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -13,6 +13,8 @@ from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.annlowlevel import llhelper +from rpython.rlib import rawrefcount + #________________________________________________________ # type description @@ -31,12 +33,13 @@ # similar to PyType_GenericAlloc? # except that it's not related to any pypy object. - pytype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) + pytype = get_pyobj_and_incref(space, w_type) + pytype = rffi.cast(PyTypeObjectPtr, pytype) + assert pytype # Don't increase refcount for non-heaptypes - if pytype: - flags = rffi.cast(lltype.Signed, pytype.c_tp_flags) - if not flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, w_type) + flags = rffi.cast(lltype.Signed, pytype.c_tp_flags) + if not flags & Py_TPFLAGS_HEAPTYPE: + Py_DecRef(space, w_type) if pytype: size = pytype.c_tp_basicsize @@ -158,19 +161,18 @@ #state = space.fromcache(RefcountState) w_type = space.type(w_obj) if w_type.is_cpytype(): - py_obj = state.get_from_lifeline(w_obj) + ZZZ # py_obj = state.get_from_lifeline(w_obj) if py_obj: Py_IncRef(space, py_obj) return py_obj typedescr = get_typedescr(w_obj.typedef) py_obj = typedescr.allocate(space, w_type, itemcount=itemcount) - if w_type.is_cpytype(): - state.set_lifeline(w_obj, py_obj) + track_reference(space, py_obj, w_obj) typedescr.attach(space, py_obj, w_obj) return py_obj -def track_reference(space, py_obj, w_obj, replace=False): +def track_reference(space, py_obj, w_obj): """ Ties together a PyObject and an interpreter object. """ @@ -181,8 +183,6 @@ debug_refcount("MAKREF", py_obj, w_obj) assert w_obj assert py_obj - if not replace: - assert w_obj not in state.py_objects_w2r rawrefcount.create_link_pypy(py_obj, w_obj) def make_ref(space, w_obj): @@ -239,14 +239,17 @@ """ Returns a 'PyObject *' representing the given intepreter object. This doesn't give a new reference, but the returned 'PyObject *' - is valid at least as long as 'w_obj' is. To be safe, you should - use keepalive_until_here(w_obj) some time later. - - NOTE: get_pyobj_and_incref() is safer. + is valid at least as long as 'w_obj' is. **To be safe, you should + use keepalive_until_here(w_obj) some time later.** In case of + doubt, use the safer get_pyobj_and_incref(). """ if w_obj is not None: assert not is_pyobj(w_obj) - return XXXXXXXXXXX + py_obj = rawrefcount.from_obj(PyObject, w_obj) + if not py_obj: + py_obj = create_ref(space, w_obj) + #track_reference(space, py_obj, w_obj) -- included with create_ref() + return py_obj else: return lltype.nullptr(PyObject.TO) as_pyobj._always_inline_ = 'try' diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -311,54 +311,12 @@ realize=type_realize, dealloc=type_dealloc) - # some types are difficult to create because of cycles. - # - object.ob_type = type - # - type.ob_type = type - # - tuple.ob_type = type - # - type.tp_base = object - # - tuple.tp_base = object - # - type.tp_bases is a tuple - # - object.tp_bases is a tuple - # - tuple.tp_bases is a tuple - - # we create the types manually here + # we create the type "type" manually here, because of the cycle + # through its 'c_ob_type' field py_type = _type_alloc(space, lltype.nullptr(PyTypeObject)) - py_object = _type_alloc(space, lltype.nullptr(PyTypeObject)) - py_tuple = _type_alloc(space, lltype.nullptr(PyTypeObject)) - py_str = _type_alloc(space, lltype.nullptr(PyTypeObject)) - ... - - py_type = create_ref(space, space.w_type) - py_object = create_ref(space, space.w_object) - py_tuple = create_ref(space, space.w_tuple) - py_str = create_ref(space, space.w_str) - # XXX py_str is not initialized here correctly, because we are - # not tracking it, it gets an empty c_ob_type from py_basestring - - # form cycles - pto_type = rffi.cast(PyTypeObjectPtr, py_type) - py_type.c_ob_type = pto_type - py_object.c_ob_type = pto_type - py_tuple.c_ob_type = pto_type - - pto_object = rffi.cast(PyTypeObjectPtr, py_object) - pto_type.c_tp_base = pto_object - pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple) - pto_tuple.c_tp_base = pto_object - - pto_type.c_tp_bases.c_ob_type = pto_tuple - pto_object.c_tp_bases.c_ob_type = pto_tuple - pto_tuple.c_tp_bases.c_ob_type = pto_tuple - - for typ in (py_type, py_object, py_tuple, py_str): - heaptype = rffi.cast(PyHeapTypeObject, typ) - heaptype.c_ht_name.c_ob_type = pto_type - - # Restore the mapping - track_reference(space, py_type, space.w_type, replace=True) - track_reference(space, py_object, space.w_object, replace=True) - track_reference(space, py_tuple, space.w_tuple, replace=True) - track_reference(space, py_str, space.w_str, replace=True) + py_type.c_ob_type = rffi.cast(PyTypeObjectPtr, py_type) + track_reference(space, py_type, space.w_type) + type_attach(space, py_type, space.w_type) @cpython_api([PyObject], lltype.Void, external=False) @@ -510,7 +468,7 @@ if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) - heaptype.c_ht_name = make_ref(space, w_typename) + heaptype.c_ht_name = get_pyobj_and_incref(space, w_typename) from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: From pypy.commits at gmail.com Tue Jan 26 17:36:38 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 14:36:38 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: fixes, now getting a segfault Message-ID: <56a7f4f6.c96cc20a.32d30.5847@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81957:0da7861a5b1a Date: 2016-01-26 23:35 +0100 http://bitbucket.org/pypy/pypy/changeset/0da7861a5b1a/ Log: fixes, now getting a segfault diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -11,7 +11,7 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject from rpython.rlib.objectmodel import specialize, we_are_translated -from rpython.rlib.rweakref import RWeakKeyDictionary +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rawrefcount @@ -183,7 +183,7 @@ debug_refcount("MAKREF", py_obj, w_obj) assert w_obj assert py_obj - rawrefcount.create_link_pypy(py_obj, w_obj) + rawrefcount.create_link_pypy(w_obj, py_obj) def make_ref(space, w_obj): """ diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -6,6 +6,7 @@ from pypy.module.cpyext.test.test_cpyext import freeze_refcnts, LeakCheckingTest PyObject = api.PyObject from pypy.interpreter.error import OperationError +from rpython.rlib import rawrefcount import os @api.cpython_api([PyObject], lltype.Void) @@ -36,6 +37,9 @@ cls.api = CAPI() CAPI.__dict__.update(api.INTERPLEVEL_API) + print 'DONT_FREE_ANY_MORE' + rawrefcount._dont_free_any_more() + def raises(self, space, api, expected_exc, f, *args): if not callable(f): raise Exception("%s is not callable" % (f,)) @@ -49,6 +53,7 @@ return state.clear_exception() def setup_method(self, func): + #return # ZZZ freeze_refcnts(self) def teardown_method(self, func): From pypy.commits at gmail.com Tue Jan 26 18:08:49 2016 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 26 Jan 2016 15:08:49 -0800 (PST) Subject: [pypy-commit] pypy py3.3: io.BufferedWriter: completely remove max_buffer_size argument. Message-ID: <56a7fc81.4f911c0a.b4082.ffffd153@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r81958:18cc712aac9b Date: 2016-01-26 22:52 +0100 http://bitbucket.org/pypy/pypy/changeset/18cc712aac9b/ Log: io.BufferedWriter: completely remove max_buffer_size argument. diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -91,10 +91,6 @@ rwbuffer.setslice(0, data) return space.wrap(len(data)) - def _complain_about_max_buffer_size(self, space): - space.warn(space.wrap("max_buffer_size is deprecated"), - space.w_DeprecationWarning) - W_BufferedIOBase.typedef = TypeDef( '_io._BufferedIOBase', W_IOBase.typedef, __new__ = generic_new_descr(W_BufferedIOBase), @@ -888,11 +884,8 @@ ) class W_BufferedWriter(BufferedMixin, W_BufferedIOBase): - @unwrap_spec(buffer_size=int, max_buffer_size=int) - def descr_init(self, space, w_raw, buffer_size=DEFAULT_BUFFER_SIZE, - max_buffer_size=-234): - if max_buffer_size != -234: - self._complain_about_max_buffer_size(space) + @unwrap_spec(buffer_size=int) + def descr_init(self, space, w_raw, buffer_size=DEFAULT_BUFFER_SIZE): self.state = STATE_ZERO check_writable_w(space, w_raw) @@ -954,12 +947,10 @@ w_reader = None w_writer = None - @unwrap_spec(buffer_size=int, max_buffer_size=int) + @unwrap_spec(buffer_size=int) def descr_init(self, space, w_reader, w_writer, - buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=-234): + buffer_size=DEFAULT_BUFFER_SIZE): try: - if max_buffer_size != -234: - self._complain_about_max_buffer_size(space) self.w_reader = W_BufferedReader(space) self.w_reader.descr_init(space, w_reader, buffer_size) self.w_writer = W_BufferedWriter(space) @@ -1029,12 +1020,8 @@ ) class W_BufferedRandom(BufferedMixin, W_BufferedIOBase): - @unwrap_spec(buffer_size=int, max_buffer_size=int) - def descr_init(self, space, w_raw, buffer_size=DEFAULT_BUFFER_SIZE, - max_buffer_size=-234): - if max_buffer_size != -234: - self._complain_about_max_buffer_size(space) - + @unwrap_spec(buffer_size=int) + def descr_init(self, space, w_raw, buffer_size=DEFAULT_BUFFER_SIZE): self.state = STATE_ZERO check_readable_w(space, w_raw) check_writable_w(space, w_raw) diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -749,28 +749,17 @@ raises(TypeError, f.readline, 5.3) -class AppTestDeprecation: +class AppTestMaxBuffer: - def w_check_max_buffer_size_deprecation(self, test): + def w_check_max_buffer_size_removal(self, test): import _io - import _warnings - def simplefilter(action, category): - _warnings.filters.insert(0, (action, None, category, None, 0)) - simplefilter('error', DeprecationWarning) - try: - test(_io.BytesIO(), 8, 12) - except DeprecationWarning as e: - assert 'max_buffer_size is deprecated' in str(e) - else: - assert False, 'Expected DeprecationWarning' - finally: - simplefilter('default', DeprecationWarning) + raises(TypeError, test, _io.BytesIO(), 8, 12) - def test_max_buffer_size_deprecation(self): + def test_max_buffer_size_removal(self): import _io - self.check_max_buffer_size_deprecation(_io.BufferedWriter) - self.check_max_buffer_size_deprecation(_io.BufferedRandom) - self.check_max_buffer_size_deprecation( + self.check_max_buffer_size_removal(_io.BufferedWriter) + self.check_max_buffer_size_removal(_io.BufferedRandom) + self.check_max_buffer_size_removal ( lambda raw, *args: _io.BufferedRWPair(raw, raw, *args)) From pypy.commits at gmail.com Tue Jan 26 18:10:07 2016 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 26 Jan 2016 15:10:07 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Relax a test: with pypy we cannot choose the name of the function when argument parsing fails. Message-ID: <56a7fccf.8f7e1c0a.e19ba.ffffd2a7@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r81959:43daeebf0ab1 Date: 2016-01-26 22:44 +0100 http://bitbucket.org/pypy/pypy/changeset/43daeebf0ab1/ Log: Relax a test: with pypy we cannot choose the name of the function when argument parsing fails. diff --git a/lib-python/3/test/test_io.py b/lib-python/3/test/test_io.py --- a/lib-python/3/test/test_io.py +++ b/lib-python/3/test/test_io.py @@ -1079,7 +1079,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedReader"): + with self.assertRaisesRegex(TypeError, "BufferedReader|__init__"): self.tp(io.BytesIO(), 1024, 1024, 1024) @@ -1386,7 +1386,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedWriter"): + with self.assertRaisesRegex(TypeError, "BufferedWriter|__init__"): self.tp(io.BytesIO(), 1024, 1024, 1024) @@ -1779,7 +1779,7 @@ def test_args_error(self): # Issue #17275 - with self.assertRaisesRegex(TypeError, "BufferedRandom"): + with self.assertRaisesRegex(TypeError, "BufferedRandom|__init__"): self.tp(io.BytesIO(), 1024, 1024, 1024) From pypy.commits at gmail.com Tue Jan 26 18:10:09 2016 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 26 Jan 2016 15:10:09 -0800 (PST) Subject: [pypy-commit] pypy py3.3: TextIOWrapper: Ensure the constructor complains if passed a codec that isn't Message-ID: <56a7fcd1.a3f6c20a.2ed55.5f8e@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r81960:5ddd2d347061 Date: 2016-01-27 00:03 +0100 http://bitbucket.org/pypy/pypy/changeset/5ddd2d347061/ Log: TextIOWrapper: Ensure the constructor complains if passed a codec that isn't marked as a text encoding. diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -413,10 +413,16 @@ else: self.writenl = None + w_codec = interp_codecs.lookup_codec(space, + space.str_w(self.w_encoding)) + if not space.is_true(space.getattr(w_codec, + space.wrap('_is_text_encoding'))): + msg = ("%R is not a text encoding; " + "use codecs.open() to handle arbitrary codecs") + raise oefmt(space.w_LookupError, msg, self.w_encoding) + # build the decoder object if space.is_true(space.call_method(w_buffer, "readable")): - w_codec = interp_codecs.lookup_codec(space, - space.str_w(self.w_encoding)) self.w_decoder = space.call_method(w_codec, "incrementaldecoder", w_errors) if self.readuniversal: @@ -426,8 +432,6 @@ # build the encoder object if space.is_true(space.call_method(w_buffer, "writable")): - w_codec = interp_codecs.lookup_codec(space, - space.str_w(self.w_encoding)) self.w_encoder = space.call_method(w_codec, "incrementalencoder", w_errors) diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -367,15 +367,8 @@ def test_illegal_decoder(self): import _io - t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', - encoding='quopri_codec') - raises(TypeError, t.read, 1) - t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', - encoding='quopri_codec') - raises(TypeError, t.readline) - t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', - encoding='quopri_codec') - raises(TypeError, t.read) + raises(LookupError, _io.TextIOWrapper, _io.BytesIO(), + encoding='quopri_codec') def test_read_nonbytes(self): import _io From pypy.commits at gmail.com Tue Jan 26 18:19:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 15:19:58 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: Found the segfault's origin, made more explicit that way (not fixed yet) Message-ID: <56a7ff1e.a5c9c20a.42161.653d@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81961:d9a31cd29535 Date: 2016-01-27 00:19 +0100 http://bitbucket.org/pypy/pypy/changeset/d9a31cd29535/ Log: Found the segfault's origin, made more explicit that way (not fixed yet) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -439,6 +439,9 @@ pto.c_tp_as_mapping = heaptype.c_as_mapping pto.c_tp_as_buffer = heaptype.c_as_buffer + pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out + pto.c_tp_itemsize = 0 + return rffi.cast(PyObject, heaptype) def type_attach(space, py_obj, w_type): @@ -473,8 +476,6 @@ pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: pto.c_tp_name = rffi.str2charp(w_type.name) - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out - pto.c_tp_itemsize = 0 # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement From pypy.commits at gmail.com Tue Jan 26 18:27:41 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 15:27:41 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: probably fixed by moving this part of the code later (this is part of an Message-ID: <56a800ed.88c8c20a.365f1.6515@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81962:834fd655953d Date: 2016-01-27 00:26 +0100 http://bitbucket.org/pypy/pypy/changeset/834fd655953d/ Log: probably fixed by moving this part of the code later (this is part of an attempt at simplifying the bootstrapping logic in init_typeobject, will see how far this theory goes or if I need to really reproduce it again) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -468,20 +468,13 @@ PyObject_Del.api_func.get_wrapper(space)) pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, PyType_GenericAlloc.api_func.get_wrapper(space)) - if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: - w_typename = space.getattr(w_type, space.wrap('__name__')) - heaptype = rffi.cast(PyHeapTypeObject, pto) - heaptype.c_ht_name = get_pyobj_and_incref(space, w_typename) - from pypy.module.cpyext.stringobject import PyString_AsString - pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) - else: - pto.c_tp_name = rffi.str2charp(w_type.name) # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement # c_tp_compare and the following fields (see http://docs.python.org/c-api/typeobj.html ) w_base = best_base(space, w_type.bases_w) - pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base)) + py_base = get_pyobj_and_incref(space, w_base) + pto.c_tp_base = rffi.cast(PyTypeObjectPtr, py_base) finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -496,6 +489,16 @@ if space.is_w(w_type, space.w_object): pto.c_tp_new = rffi.cast(newfunc, 1) update_all_slots(space, w_type, pto) + + if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: + w_typename = space.getattr(w_type, space.wrap('__name__')) + heaptype = rffi.cast(PyHeapTypeObject, pto) + heaptype.c_ht_name = get_pyobj_and_incref(space, w_typename) + from pypy.module.cpyext.stringobject import PyString_AsString + pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) + else: + pto.c_tp_name = rffi.str2charp(w_type.name) + pto.c_tp_flags |= Py_TPFLAGS_READY return pto @@ -562,8 +565,7 @@ if not py_type.c_tp_base: # borrowed reference, but w_object is unlikely to disappear - base = make_ref(space, space.w_object) - Py_DecRef(space, base) + base = as_pyobj(space, space.w_object) py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base) finish_type_1(space, py_type) @@ -597,7 +599,7 @@ bases = space.newtuple([]) else: bases = space.newtuple([from_ref(space, base_pyo)]) - pto.c_tp_bases = make_ref(space, bases) + pto.c_tp_bases = get_pyobj_and_incref(space, bases) def finish_type_2(space, pto, w_obj): """ From pypy.commits at gmail.com Tue Jan 26 18:36:51 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 15:36:51 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: Rename: this new function should really be a superset of make_ref() Message-ID: <56a80313.41dfc20a.e49d.6721@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81963:af2938e59cd3 Date: 2016-01-27 00:29 +0100 http://bitbucket.org/pypy/pypy/changeset/af2938e59cd3/ Log: Rename: this new function should really be a superset of make_ref() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -675,7 +675,7 @@ if callable.api_func.result_borrowed: retval = as_pyobj(space, result) else: - retval = get_pyobj_and_incref(space, result) + retval = make_ref(space, result) retval = rffi.cast(callable.api_func.restype, retval) else: retval = lltype.nullptr(PyObject.TO) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -33,7 +33,7 @@ # similar to PyType_GenericAlloc? # except that it's not related to any pypy object. - pytype = get_pyobj_and_incref(space, w_type) + pytype = make_ref(space, w_type) pytype = rffi.cast(PyTypeObjectPtr, pytype) assert pytype # Don't increase refcount for non-heaptypes @@ -241,7 +241,7 @@ This doesn't give a new reference, but the returned 'PyObject *' is valid at least as long as 'w_obj' is. **To be safe, you should use keepalive_until_here(w_obj) some time later.** In case of - doubt, use the safer get_pyobj_and_incref(). + doubt, use the safer make_ref(). """ if w_obj is not None: assert not is_pyobj(w_obj) @@ -293,7 +293,7 @@ return hop.inputconst(lltype.Bool, hop.s_result.const) @specialize.ll() -def get_pyobj_and_incref(space, obj): +def make_ref(space, obj): """Increment the reference counter of the PyObject and return it. Can be called with either a PyObject or a W_Root. """ @@ -309,7 +309,7 @@ return pyobj else: return lltype.nullptr(PyObject.TO) -INTERPLEVEL_API['get_pyobj_and_incref'] = get_pyobj_and_incref +INTERPLEVEL_API['make_ref'] = make_ref @specialize.ll() @@ -335,7 +335,7 @@ @specialize.ll() def incref(space, obj): - get_pyobj_and_incref(space, obj) + make_ref(space, obj) INTERPLEVEL_API['incref'] = incref @specialize.ll() diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -21,8 +21,7 @@ from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from, Py_DecRef, - get_pyobj_and_incref) + track_reference, RefcountState, borrow_from, Py_DecRef) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State @@ -418,7 +417,7 @@ def type_alloc(space, w_metatype): - metatype = get_pyobj_and_incref(space, w_metatype) + metatype = make_ref(space, w_metatype) metatype = rffi.cast(PyTypeObjectPtr, metatype) assert metatype # Don't increase refcount for non-heaptypes @@ -473,7 +472,7 @@ # XXX implement # c_tp_compare and the following fields (see http://docs.python.org/c-api/typeobj.html ) w_base = best_base(space, w_type.bases_w) - py_base = get_pyobj_and_incref(space, w_base) + py_base = make_ref(space, w_base) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, py_base) finish_type_1(space, pto) @@ -493,7 +492,7 @@ if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) - heaptype.c_ht_name = get_pyobj_and_incref(space, w_typename) + heaptype.c_ht_name = make_ref(space, w_typename) from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: @@ -599,7 +598,7 @@ bases = space.newtuple([]) else: bases = space.newtuple([from_ref(space, base_pyo)]) - pto.c_tp_bases = get_pyobj_and_incref(space, bases) + pto.c_tp_bases = make_ref(space, bases) def finish_type_2(space, pto, w_obj): """ From pypy.commits at gmail.com Tue Jan 26 18:36:53 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 15:36:53 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: rename from_pyobj() back to from_ref() too Message-ID: <56a80315.520e1c0a.545fd.ffffd6f5@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81964:769d8cdc1853 Date: 2016-01-27 00:34 +0100 http://bitbucket.org/pypy/pypy/changeset/769d8cdc1853/ Log: rename from_pyobj() back to from_ref() too diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -308,7 +308,7 @@ elif is_PyObject(ARG) and is_wrapped: # build a W_Root, possibly from a 'PyObject *' if is_pyobj(input_arg): - arg = from_pyobj(space, input_arg) + arg = from_ref(space, input_arg) else: arg = input_arg @@ -634,7 +634,7 @@ arg = args[i] if is_PyObject(typ) and is_wrapped: assert is_pyobj(arg) - arg_conv = from_pyobj(space, arg) + arg_conv = from_ref(space, arg) else: arg_conv = arg boxed_args += (arg_conv, ) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -209,17 +209,12 @@ Finds the interpreter object corresponding to the given reference. If the object is not yet realized (see stringobject.py), creates it. """ - GOES_AWAY - assert lltype.typeOf(ref) == PyObject + assert is_pyobj(ref) if not ref: return None - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, ref) - - try: - return state.py_objects_r2w[ptr] - except KeyError: - pass + w_obj = rawrefcount.to_obj(W_Root, pyobj) + if w_obj is not None: + return w_obj # This reference is not yet a real interpreter object. # Realize it. @@ -259,20 +254,6 @@ return rawrefcount.to_obj(W_Root, pyobj) is not None INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) - at specialize.ll() -def from_pyobj(space, pyobj): - assert is_pyobj(pyobj) - if pyobj: - pyobj = rffi.cast(PyObject, pyobj) - w_obj = rawrefcount.to_obj(W_Root, pyobj) - if w_obj is None: - XXXXXXXXXXX - return w_obj - else: - return None -from_pyobj._always_inline_ = 'try' -INTERPLEVEL_API['from_pyobj'] = from_pyobj - def is_pyobj(x): if x is None or isinstance(x, W_Root): @@ -321,7 +302,7 @@ """ if is_pyobj(obj): pyobj = rffi.cast(PyObject, obj) - w_obj = from_pyobj(space, pyobj) + w_obj = from_ref(space, pyobj) else: w_obj = obj pyobj = as_pyobj(space, w_obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -21,7 +21,7 @@ from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from, Py_DecRef) + track_reference, RefcountState, borrow_from, Py_DecRef, as_pyobj) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State From pypy.commits at gmail.com Tue Jan 26 19:19:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 16:19:56 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: Hopefully fix the bootstrap cycles in this way Message-ID: <56a80d2c.520e1c0a.545fd.ffffe019@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81965:ae316294940c Date: 2016-01-27 01:04 +0100 http://bitbucket.org/pypy/pypy/changeset/ae316294940c/ Log: Hopefully fix the bootstrap cycles in this way diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -310,13 +310,24 @@ realize=type_realize, dealloc=type_dealloc) - # we create the type "type" manually here, because of the cycle - # through its 'c_ob_type' field + # There is the obvious cycle of 'type(type) == type', but there are + # also several other ones, like 'tuple.tp_bases' being itself a + # tuple instance. We solve the first one by creating the type + # "type" manually here. For the other cycles, we fix them by delaying + # creation of the types here, and hoping nothing breaks by seeing + # uninitialized-yet types (only for a few basic types like 'type', + # 'tuple', 'object', 'str'). + space._cpyext_delay_type_creation = [] + py_type = _type_alloc(space, lltype.nullptr(PyTypeObject)) py_type.c_ob_type = rffi.cast(PyTypeObjectPtr, py_type) track_reference(space, py_type, space.w_type) type_attach(space, py_type, space.w_type) + while space._cpyext_delay_type_creation: + _type_really_attach(space, *space._cpyext_delay_type_creation.pop()) + del space._cpyext_delay_type_creation + @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): @@ -447,6 +458,13 @@ """ Fills a newly allocated PyTypeObject from an existing type. """ + if hasattr(space, '_cpyext_delay_type_creation'): + space._cpyext_delay_type_creation.append((py_obj, w_type)) + else: + _type_really_attach(space, py_obj, w_type) + return rffi.cast(PyTypeObjectPtr, py_obj) + +def _type_really_attach(space, py_obj, w_type): from pypy.module.cpyext.object import PyObject_Del assert isinstance(w_type, W_TypeObject) @@ -499,7 +517,6 @@ pto.c_tp_name = rffi.str2charp(w_type.name) pto.c_tp_flags |= Py_TPFLAGS_READY - return pto def py_type_ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: @@ -513,6 +530,7 @@ def type_realize(space, py_obj): pto = rffi.cast(PyTypeObjectPtr, py_obj) + assert pto.c_tp_flags & Py_TPFLAGS_READY == 0 assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: @@ -590,7 +608,8 @@ base = pto.c_tp_base base_pyo = rffi.cast(PyObject, pto.c_tp_base) if base and not base.c_tp_flags & Py_TPFLAGS_READY: - type_realize(space, rffi.cast(PyObject, base_pyo)) + if not hasattr(space, '_cpyext_delay_type_creation'): + type_realize(space, rffi.cast(PyObject, base_pyo)) if base and not pto.c_ob_type: # will be filled later pto.c_ob_type = base.c_ob_type if not pto.c_tp_bases: diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -164,6 +164,10 @@ else: w_self.terminator = NoDictTerminator(space, w_self) + def __repr__(self): + "NOT_RPYTHON" + return '' % (self.name, id(self)) + def mutated(w_self, key): """ The type is being mutated. key is either the string containing the From pypy.commits at gmail.com Tue Jan 26 19:19:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 16:19:58 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: More tweaks for bootstrapping Message-ID: <56a80d2e.0f811c0a.1bc63.1ade@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81966:dfc37d8a80f8 Date: 2016-01-27 01:17 +0100 http://bitbucket.org/pypy/pypy/changeset/dfc37d8a80f8/ Log: More tweaks for bootstrapping diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -5,7 +5,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, - CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, + CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject, INTERPLEVEL_API) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject @@ -212,7 +212,7 @@ assert is_pyobj(ref) if not ref: return None - w_obj = rawrefcount.to_obj(W_Root, pyobj) + w_obj = rawrefcount.to_obj(W_Root, ref) if w_obj is not None: return w_obj diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -324,9 +324,21 @@ track_reference(space, py_type, space.w_type) type_attach(space, py_type, space.w_type) + as_pyobj(space, space.w_str) + as_pyobj(space, space.w_tuple) + as_pyobj(space, space.w_object) + + delayed_types = [] while space._cpyext_delay_type_creation: - _type_really_attach(space, *space._cpyext_delay_type_creation.pop()) + (py_obj, w_type) = space._cpyext_delay_type_creation.pop() + _type_really_attach(space, py_obj, w_type) + delayed_types.append((py_obj, w_type)) del space._cpyext_delay_type_creation + for py_obj, w_type in delayed_types: + pto = rffi.cast(PyTypeObjectPtr, py_obj) + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + finish_type_3(space, pto, w_type) @cpython_api([PyObject], lltype.Void, external=False) @@ -493,8 +505,9 @@ py_base = make_ref(space, w_base) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, py_base) - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) + if not hasattr(space, '_cpyext_delay_type_creation'): + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) if pto.c_tp_base: @@ -507,6 +520,12 @@ pto.c_tp_new = rffi.cast(newfunc, 1) update_all_slots(space, w_type, pto) + if not hasattr(space, '_cpyext_delay_type_creation'): + finish_type_3(space, pto, w_type) + + pto.c_tp_flags |= Py_TPFLAGS_READY + +def finish_type_3(space, pto, w_type): if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) @@ -516,8 +535,6 @@ else: pto.c_tp_name = rffi.str2charp(w_type.name) - pto.c_tp_flags |= Py_TPFLAGS_READY - def py_type_ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: return @@ -608,8 +625,7 @@ base = pto.c_tp_base base_pyo = rffi.cast(PyObject, pto.c_tp_base) if base and not base.c_tp_flags & Py_TPFLAGS_READY: - if not hasattr(space, '_cpyext_delay_type_creation'): - type_realize(space, rffi.cast(PyObject, base_pyo)) + type_realize(space, rffi.cast(PyObject, base_pyo)) if base and not pto.c_ob_type: # will be filled later pto.c_ob_type = base.c_ob_type if not pto.c_tp_bases: From pypy.commits at gmail.com Tue Jan 26 19:20:00 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 26 Jan 2016 16:20:00 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: fix imports Message-ID: <56a80d30.a453c20a.d2467.6ef7@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81967:fcc70c53434c Date: 2016-01-27 01:19 +0100 http://bitbucket.org/pypy/pypy/changeset/fcc70c53434c/ Log: fix imports diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -290,9 +290,8 @@ @specialize.ll() def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference newargs = () keepalives = () assert len(args) == len(api_function.argtypes) From pypy.commits at gmail.com Wed Jan 27 07:45:29 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 04:45:29 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: A branch to simplify the bootstrapping, to reduce the amount of changes Message-ID: <56a8bbe9.46bb1c0a.8ae80.ffffed8d@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81968:30bbc81f0b06 Date: 2016-01-27 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/30bbc81f0b06/ Log: A branch to simplify the bootstrapping, to reduce the amount of changes in cpyext-gc-support-2 From pypy.commits at gmail.com Wed Jan 27 07:45:31 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 04:45:31 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: Kill kill kill the logic in init_typeobject(). It was anyway very Message-ID: <56a8bbeb.cf821c0a.32477.ffffb9f5@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81969:3cd5ace97b49 Date: 2016-01-27 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/3cd5ace97b49/ Log: Kill kill kill the logic in init_typeobject(). It was anyway very strange, because it would create "heap" type objects, initialize them carefully, and then forget them happily by calling track_reference() again (which would crash if DEBUG_REFCOUNT=True) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -828,6 +828,9 @@ space.fromcache(State).install_dll(eci) # populate static data + from pypy.module.cpyext.pyobject import track_reference, get_typedescr + from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + to_attach = [] for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext w_obj = eval(expr) @@ -861,18 +864,25 @@ # we have a structure, get its address in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll)) - from pypy.module.cpyext.pyobject import ( - track_reference, get_typedescr) - w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) py_obj.c_ob_refcnt = 1 - py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, - make_ref(space, w_type)) - typedescr.attach(space, py_obj, w_obj) track_reference(space, py_obj, w_obj) + to_attach.append((py_obj, w_obj)) else: assert False, "Unknown static object: %s %s" % (typ, name) + space._cpyext_type_init = [] + for py_obj, w_obj in to_attach: + w_type = space.type(w_obj) + typedescr = get_typedescr(w_type.instancetypedef) + py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, + make_ref(space, w_type)) + typedescr.attach(space, py_obj, w_obj) + cpyext_type_init = space._cpyext_type_init + del space._cpyext_type_init + for pto, w_type in cpyext_type_init: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -310,55 +310,6 @@ realize=type_realize, dealloc=type_dealloc) - # some types are difficult to create because of cycles. - # - object.ob_type = type - # - type.ob_type = type - # - tuple.ob_type = type - # - type.tp_base = object - # - tuple.tp_base = object - # - type.tp_bases is a tuple - # - object.tp_bases is a tuple - # - tuple.tp_bases is a tuple - - # insert null placeholders to please create_ref() - track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_str) - - # create the objects - py_type = create_ref(space, space.w_type) - py_object = create_ref(space, space.w_object) - py_tuple = create_ref(space, space.w_tuple) - py_str = create_ref(space, space.w_str) - # XXX py_str is not initialized here correctly, because we are - # not tracking it, it gets an empty c_ob_type from py_basestring - - # form cycles - pto_type = rffi.cast(PyTypeObjectPtr, py_type) - py_type.c_ob_type = pto_type - py_object.c_ob_type = pto_type - py_tuple.c_ob_type = pto_type - - pto_object = rffi.cast(PyTypeObjectPtr, py_object) - pto_type.c_tp_base = pto_object - pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple) - pto_tuple.c_tp_base = pto_object - - pto_type.c_tp_bases.c_ob_type = pto_tuple - pto_object.c_tp_bases.c_ob_type = pto_tuple - pto_tuple.c_tp_bases.c_ob_type = pto_tuple - - for typ in (py_type, py_object, py_tuple, py_str): - heaptype = rffi.cast(PyHeapTypeObject, typ) - heaptype.c_ht_name.c_ob_type = pto_type - - # Restore the mapping - track_reference(space, py_type, space.w_type, replace=True) - track_reference(space, py_object, space.w_object, replace=True) - track_reference(space, py_tuple, space.w_tuple, replace=True) - track_reference(space, py_str, space.w_str, replace=True) - @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): @@ -520,8 +471,11 @@ w_base = best_base(space, w_type.bases_w) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base)) - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) + if hasattr(space, '_cpyext_type_init'): + space._cpyext_type_init.append((pto, w_type)) + else: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) if pto.c_tp_base: From pypy.commits at gmail.com Wed Jan 27 08:27:45 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 05:27:45 -0800 (PST) Subject: [pypy-commit] pypy default: Add "-m ensurepip" Message-ID: <56a8c5d1.c615c20a.6aa2b.5547@mx.google.com> Author: Armin Rigo Branch: Changeset: r81970:4974d1173ac2 Date: 2016-01-27 14:26 +0100 http://bitbucket.org/pypy/pypy/changeset/4974d1173ac2/ Log: Add "-m ensurepip" diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -54,7 +54,8 @@ It is quite common nowadays that xyz is available on PyPI_ and installable with ``pip install xyz``. The simplest solution is to `use virtualenv (as documented here)`_. Then enter (activate) the virtualenv -and type: ``pip install xyz``. +and type: ``pip install xyz``. If you don't know or don't want virtualenv, +you can also install ``pip`` globally by saying ``pypy -m ensurepip``. If you get errors from the C compiler, the module is a CPython C Extension module using unsupported features. `See below.`_ From pypy.commits at gmail.com Wed Jan 27 08:51:04 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 05:51:04 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: pfff, took me a while to figure out that memory corruption was going on here: Message-ID: <56a8cb48.4f911c0a.b4082.ffffcfbb@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81971:ed11e491b6c2 Date: 2016-01-27 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ed11e491b6c2/ Log: pfff, took me a while to figure out that memory corruption was going on here: the globals False/True would be initialized as longs by writing 0/1 in the third word of memory, but they were only 2-words PyObjects... diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -442,8 +442,8 @@ TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'), + '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), @@ -855,7 +855,7 @@ assert False, "Unknown static pointer: %s %s" % (typ, name) ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value - elif typ in ('PyObject*', 'PyTypeObject*'): + elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) From pypy.commits at gmail.com Wed Jan 27 10:37:04 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 07:37:04 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: This test checks a condition which is actually not true, but which Message-ID: <56a8e420.d02d1c0a.b9ed.fffffdef@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81972:dd3a1abe4374 Date: 2016-01-27 16:35 +0100 http://bitbucket.org/pypy/pypy/changeset/dd3a1abe4374/ Log: This test checks a condition which is actually not true, but which happened to be true due to the two versions of some PyTypeObjects which this branch removes. The condition is false e.g. after translation anyway. diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -374,6 +374,11 @@ module = self.import_extension('foo', [ ("test_type", "METH_O", ''' + /* "args->ob_type" is a strange way to get at 'type', + which should have a different tp_getattro/tp_setattro + than its tp_base, which is 'object'. + */ + if (!args->ob_type->tp_setattro) { PyErr_SetString(PyExc_ValueError, "missing tp_setattro"); @@ -382,8 +387,12 @@ if (args->ob_type->tp_setattro == args->ob_type->tp_base->tp_setattro) { - PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); - return NULL; + /* Note that unlike CPython, in PyPy 'type.tp_setattro' + is the same function as 'object.tp_setattro'. This + test used to check that it was not, but that was an + artifact of the bootstrap logic only---in the final + C sources I checked and they are indeed the same. + So we ignore this problem here. */ } if (!args->ob_type->tp_getattro) { From pypy.commits at gmail.com Wed Jan 27 11:40:37 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 08:40:37 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: Attempt to fix translation, getting segfault while translating now Message-ID: <56a8f305.863f1c0a.33db4.4b60@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81973:036fa7ebc766 Date: 2016-01-27 17:03 +0100 http://bitbucket.org/pypy/pypy/changeset/036fa7ebc766/ Log: Attempt to fix translation, getting segfault while translating now diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -828,9 +828,7 @@ space.fromcache(State).install_dll(eci) # populate static data - from pypy.module.cpyext.pyobject import track_reference, get_typedescr - from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 - to_attach = [] + builder = StaticObjectBuilder(space) for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext w_obj = eval(expr) @@ -864,24 +862,10 @@ # we have a structure, get its address in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll)) - py_obj.c_ob_refcnt = 1 - track_reference(space, py_obj, w_obj) - to_attach.append((py_obj, w_obj)) + builder.prepare(py_obj, w_obj) else: assert False, "Unknown static object: %s %s" % (typ, name) - - space._cpyext_type_init = [] - for py_obj, w_obj in to_attach: - w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) - py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, - make_ref(space, w_type)) - typedescr.attach(space, py_obj, w_obj) - cpyext_type_init = space._cpyext_type_init - del space._cpyext_type_init - for pto, w_type in cpyext_type_init: - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) + builder.attach_all() pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') @@ -898,6 +882,36 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') + +class StaticObjectBuilder: + def __init__(self, space): + self.space = space + self.to_attach = [] + + def prepare(self, py_obj, w_obj): + from pypy.module.cpyext.pyobject import track_reference + py_obj.c_ob_refcnt = 1 + track_reference(self.space, py_obj, w_obj) + self.to_attach.append((py_obj, w_obj)) + + def attach_all(self): + from pypy.module.cpyext.pyobject import get_typedescr, make_ref + from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + space = self.space + space._cpyext_type_init = [] + for py_obj, w_obj in self.to_attach: + w_type = space.type(w_obj) + typedescr = get_typedescr(w_type.instancetypedef) + py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, + make_ref(space, w_type)) + typedescr.attach(space, py_obj, w_obj) + cpyext_type_init = space._cpyext_type_init + del space._cpyext_type_init + for pto, w_type in cpyext_type_init: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + + def mangle_name(prefix, name): if name.startswith('Py'): return prefix + name[2:] @@ -1074,7 +1088,7 @@ def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import make_ref + from pypy.module.cpyext.pyobject import get_typedescr, make_ref export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) from rpython.translator.c.database import LowLevelDatabase @@ -1092,14 +1106,36 @@ run_bootstrap_functions(space) setup_va_functions(eci) + from pypy.module import cpyext # for eval() below + + # Set up the types. This version of the code really allocates + # them: this is different from build_bridge(), where they are set + # up at the static address from the bridge library. This needs + # special logic to solve the cycles issue; otherwise, we could + # simply leave everything to make_ref() in the "populate static + # data" loop below. + builder = StaticObjectBuilder(space) + for name, (typ, expr) in GLOBALS.iteritems(): + if typ == 'PyTypeObject*': + w_type = eval(expr) + w_typetype = space.type(w_type) + if not space.is_w(w_typetype, space.w_type): + continue # skip types with a custom metaclass + typedescr = get_typedescr(w_typetype.instancetypedef) + py_obj = typedescr.allocate(space, None) + builder.prepare(py_obj, w_type) + py_typetype = rffi.cast(PyTypeObjectPtr, make_ref(space, space.w_type)) + for py_obj, w_type in builder.to_attach: + py_obj.c_ob_type = py_typetype + builder.attach_all() + # populate static data for name, (typ, expr) in GLOBALS.iteritems(): name = name.replace("#", "") if name.startswith('PyExc_'): name = '_' + name - from pypy.module import cpyext w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*'): + if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue From pypy.commits at gmail.com Wed Jan 27 11:40:39 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 08:40:39 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: Some translation fixes Message-ID: <56a8f307.e906c20a.89ca3.ffffa289@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81974:a11965258086 Date: 2016-01-27 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/a11965258086/ Log: Some translation fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -506,7 +506,9 @@ def get_structtype_for_ctype(ctype): from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr from pypy.module.cpyext.cdatetime import PyDateTime_CAPI + from pypy.module.cpyext.intobject import PyIntObject return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, + "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] PyTypeObject = lltype.ForwardReference() @@ -1088,7 +1090,7 @@ def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import get_typedescr, make_ref + from pypy.module.cpyext.pyobject import make_ref export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) from rpython.translator.c.database import LowLevelDatabase @@ -1108,25 +1110,22 @@ from pypy.module import cpyext # for eval() below - # Set up the types. This version of the code really allocates - # them: this is different from build_bridge(), where they are set - # up at the static address from the bridge library. This needs - # special logic to solve the cycles issue; otherwise, we could - # simply leave everything to make_ref() in the "populate static - # data" loop below. + # Set up the types. Needs a special case, because of the + # immediate cycle involving 'c_ob_type', and because we don't + # want these types to be Py_TPFLAGS_HEAPTYPE. + static_types = {} + for name, (typ, expr) in GLOBALS.items(): + if typ == 'PyTypeObject*': + pto = lltype.malloc(PyTypeObject, immortal=True, + zero=True, flavor='raw') + pto.c_ob_refcnt = 1 + pto.c_tp_basicsize = -1 + static_types[name] = pto builder = StaticObjectBuilder(space) - for name, (typ, expr) in GLOBALS.iteritems(): - if typ == 'PyTypeObject*': - w_type = eval(expr) - w_typetype = space.type(w_type) - if not space.is_w(w_typetype, space.w_type): - continue # skip types with a custom metaclass - typedescr = get_typedescr(w_typetype.instancetypedef) - py_obj = typedescr.allocate(space, None) - builder.prepare(py_obj, w_type) - py_typetype = rffi.cast(PyTypeObjectPtr, make_ref(space, space.w_type)) - for py_obj, w_type in builder.to_attach: - py_obj.c_ob_type = py_typetype + for name, pto in static_types.items(): + pto.c_ob_type = static_types['PyType_Type#'] + w_type = eval(GLOBALS[name][1]) + builder.prepare(rffi.cast(PyObject, pto), w_type) builder.attach_all() # populate static data diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -146,7 +146,7 @@ assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) if not struct: - assert not space.config.translating + #assert not space.config.translating assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if slot_names[0] == 'c_tp_as_number': STRUCT_TYPE = PyNumberMethods @@ -427,6 +427,8 @@ pto.c_tp_as_sequence = heaptype.c_as_sequence pto.c_tp_as_mapping = heaptype.c_as_mapping pto.c_tp_as_buffer = heaptype.c_as_buffer + pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out + pto.c_tp_itemsize = 0 return rffi.cast(PyObject, heaptype) @@ -462,8 +464,6 @@ pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: pto.c_tp_name = rffi.str2charp(w_type.name) - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out - pto.c_tp_itemsize = 0 # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement From pypy.commits at gmail.com Wed Jan 27 12:06:24 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 09:06:24 -0800 (PST) Subject: [pypy-commit] pypy default: cpyext implies micronumpy, otherwise we get never-initialized ndarray subclasses Message-ID: <56a8f910.e218c20a.e673a.ffffb0d4@mx.google.com> Author: Armin Rigo Branch: Changeset: r81975:ec6457a2d845 Date: 2016-01-27 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/ec6457a2d845/ Log: cpyext implies micronumpy, otherwise we get never-initialized ndarray subclasses diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -85,7 +85,8 @@ module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], - 'cpyext': [('objspace.usemodules.array', True)], + 'cpyext': [('objspace.usemodules.array', True), + ('objspace.usemodules.micronumpy', True)], 'cppyy': [('objspace.usemodules.cpyext', True)], } module_suggests = { From pypy.commits at gmail.com Wed Jan 27 13:03:03 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:03:03 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge cpyext-bootstrap Message-ID: <56a90657.82e11c0a.12767.7089@mx.google.com> Author: Armin Rigo Branch: Changeset: r81977:7d1a37bf041c Date: 2016-01-27 19:02 +0100 http://bitbucket.org/pypy/pypy/changeset/7d1a37bf041c/ Log: hg merge cpyext-bootstrap simplify the bootstrap procedure, which was a bit manual and slightly buggy (e.g. untranslated, it made two versions of the same PyTypeObjects; and translated, the type objects ends up with Py_TPFLAGS_HEAPTYPE, even though they should not be). also fixes a potential memory corruption for initializing boolean objects. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -442,8 +442,8 @@ TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'), + '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), @@ -506,7 +506,9 @@ def get_structtype_for_ctype(ctype): from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr from pypy.module.cpyext.cdatetime import PyDateTime_CAPI + from pypy.module.cpyext.intobject import PyIntObject return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, + "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] PyTypeObject = lltype.ForwardReference() @@ -828,6 +830,7 @@ space.fromcache(State).install_dll(eci) # populate static data + builder = StaticObjectBuilder(space) for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext w_obj = eval(expr) @@ -852,7 +855,7 @@ assert False, "Unknown static pointer: %s %s" % (typ, name) ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value - elif typ in ('PyObject*', 'PyTypeObject*'): + elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) @@ -861,17 +864,10 @@ # we have a structure, get its address in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll)) - from pypy.module.cpyext.pyobject import ( - track_reference, get_typedescr) - w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) - py_obj.c_ob_refcnt = 1 - py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, - make_ref(space, w_type)) - typedescr.attach(space, py_obj, w_obj) - track_reference(space, py_obj, w_obj) + builder.prepare(py_obj, w_obj) else: assert False, "Unknown static object: %s %s" % (typ, name) + builder.attach_all() pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') @@ -888,6 +884,36 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') + +class StaticObjectBuilder: + def __init__(self, space): + self.space = space + self.to_attach = [] + + def prepare(self, py_obj, w_obj): + from pypy.module.cpyext.pyobject import track_reference + py_obj.c_ob_refcnt = 1 + track_reference(self.space, py_obj, w_obj) + self.to_attach.append((py_obj, w_obj)) + + def attach_all(self): + from pypy.module.cpyext.pyobject import get_typedescr, make_ref + from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + space = self.space + space._cpyext_type_init = [] + for py_obj, w_obj in self.to_attach: + w_type = space.type(w_obj) + typedescr = get_typedescr(w_type.instancetypedef) + py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, + make_ref(space, w_type)) + typedescr.attach(space, py_obj, w_obj) + cpyext_type_init = space._cpyext_type_init + del space._cpyext_type_init + for pto, w_type in cpyext_type_init: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + + def mangle_name(prefix, name): if name.startswith('Py'): return prefix + name[2:] @@ -1082,14 +1108,33 @@ run_bootstrap_functions(space) setup_va_functions(eci) + from pypy.module import cpyext # for eval() below + + # Set up the types. Needs a special case, because of the + # immediate cycle involving 'c_ob_type', and because we don't + # want these types to be Py_TPFLAGS_HEAPTYPE. + static_types = {} + for name, (typ, expr) in GLOBALS.items(): + if typ == 'PyTypeObject*': + pto = lltype.malloc(PyTypeObject, immortal=True, + zero=True, flavor='raw') + pto.c_ob_refcnt = 1 + pto.c_tp_basicsize = -1 + static_types[name] = pto + builder = StaticObjectBuilder(space) + for name, pto in static_types.items(): + pto.c_ob_type = static_types['PyType_Type#'] + w_type = eval(GLOBALS[name][1]) + builder.prepare(rffi.cast(PyObject, pto), w_type) + builder.attach_all() + # populate static data for name, (typ, expr) in GLOBALS.iteritems(): name = name.replace("#", "") if name.startswith('PyExc_'): name = '_' + name - from pypy.module import cpyext w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*'): + if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -374,6 +374,11 @@ module = self.import_extension('foo', [ ("test_type", "METH_O", ''' + /* "args->ob_type" is a strange way to get at 'type', + which should have a different tp_getattro/tp_setattro + than its tp_base, which is 'object'. + */ + if (!args->ob_type->tp_setattro) { PyErr_SetString(PyExc_ValueError, "missing tp_setattro"); @@ -382,8 +387,12 @@ if (args->ob_type->tp_setattro == args->ob_type->tp_base->tp_setattro) { - PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); - return NULL; + /* Note that unlike CPython, in PyPy 'type.tp_setattro' + is the same function as 'object.tp_setattro'. This + test used to check that it was not, but that was an + artifact of the bootstrap logic only---in the final + C sources I checked and they are indeed the same. + So we ignore this problem here. */ } if (!args->ob_type->tp_getattro) { diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -146,7 +146,7 @@ assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) if not struct: - assert not space.config.translating + #assert not space.config.translating assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if slot_names[0] == 'c_tp_as_number': STRUCT_TYPE = PyNumberMethods @@ -310,55 +310,6 @@ realize=type_realize, dealloc=type_dealloc) - # some types are difficult to create because of cycles. - # - object.ob_type = type - # - type.ob_type = type - # - tuple.ob_type = type - # - type.tp_base = object - # - tuple.tp_base = object - # - type.tp_bases is a tuple - # - object.tp_bases is a tuple - # - tuple.tp_bases is a tuple - - # insert null placeholders to please create_ref() - track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_str) - - # create the objects - py_type = create_ref(space, space.w_type) - py_object = create_ref(space, space.w_object) - py_tuple = create_ref(space, space.w_tuple) - py_str = create_ref(space, space.w_str) - # XXX py_str is not initialized here correctly, because we are - # not tracking it, it gets an empty c_ob_type from py_basestring - - # form cycles - pto_type = rffi.cast(PyTypeObjectPtr, py_type) - py_type.c_ob_type = pto_type - py_object.c_ob_type = pto_type - py_tuple.c_ob_type = pto_type - - pto_object = rffi.cast(PyTypeObjectPtr, py_object) - pto_type.c_tp_base = pto_object - pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple) - pto_tuple.c_tp_base = pto_object - - pto_type.c_tp_bases.c_ob_type = pto_tuple - pto_object.c_tp_bases.c_ob_type = pto_tuple - pto_tuple.c_tp_bases.c_ob_type = pto_tuple - - for typ in (py_type, py_object, py_tuple, py_str): - heaptype = rffi.cast(PyHeapTypeObject, typ) - heaptype.c_ht_name.c_ob_type = pto_type - - # Restore the mapping - track_reference(space, py_type, space.w_type, replace=True) - track_reference(space, py_object, space.w_object, replace=True) - track_reference(space, py_tuple, space.w_tuple, replace=True) - track_reference(space, py_str, space.w_str, replace=True) - @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): @@ -476,6 +427,8 @@ pto.c_tp_as_sequence = heaptype.c_as_sequence pto.c_tp_as_mapping = heaptype.c_as_mapping pto.c_tp_as_buffer = heaptype.c_as_buffer + pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out + pto.c_tp_itemsize = 0 return rffi.cast(PyObject, heaptype) @@ -511,8 +464,6 @@ pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: pto.c_tp_name = rffi.str2charp(w_type.name) - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out - pto.c_tp_itemsize = 0 # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement @@ -520,8 +471,11 @@ w_base = best_base(space, w_type.bases_w) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base)) - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) + if hasattr(space, '_cpyext_type_init'): + space._cpyext_type_init.append((pto, w_type)) + else: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) if pto.c_tp_base: From pypy.commits at gmail.com Wed Jan 27 13:03:01 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:03:01 -0800 (PST) Subject: [pypy-commit] pypy cpyext-bootstrap: Seems to work, merging Message-ID: <56a90655.6918c20a.2bb00.ffffc6f5@mx.google.com> Author: Armin Rigo Branch: cpyext-bootstrap Changeset: r81976:0fac859d23a0 Date: 2016-01-27 18:58 +0100 http://bitbucket.org/pypy/pypy/changeset/0fac859d23a0/ Log: Seems to work, merging From pypy.commits at gmail.com Wed Jan 27 13:12:05 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:12:05 -0800 (PST) Subject: [pypy-commit] pypy default: ignore branch Message-ID: <56a90875.01941c0a.adaea.3b7a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81978:5edae6a9f5e3 Date: 2016-01-27 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/5edae6a9f5e3/ Log: ignore branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,3 +133,4 @@ `rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen for traces containing a large number of pure getfield operations. +.. branch: cpyext-bootstrap From pypy.commits at gmail.com Wed Jan 27 13:13:09 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:13:09 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: hg merge default Message-ID: <56a908b5.a151c20a.c3de6.ffffcd35@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81979:1fc97a564c99 Date: 2016-01-27 19:12 +0100 http://bitbucket.org/pypy/pypy/changeset/1fc97a564c99/ Log: hg merge default diff too long, truncating to 2000 out of 4461 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -85,7 +85,8 @@ module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), ('objspace.usemodules.thread', True)], - 'cpyext': [('objspace.usemodules.array', True)], + 'cpyext': [('objspace.usemodules.array', True), + ('objspace.usemodules.micronumpy', True)], 'cppyy': [('objspace.usemodules.cpyext', True)], } module_suggests = { diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -54,7 +54,8 @@ It is quite common nowadays that xyz is available on PyPI_ and installable with ``pip install xyz``. The simplest solution is to `use virtualenv (as documented here)`_. Then enter (activate) the virtualenv -and type: ``pip install xyz``. +and type: ``pip install xyz``. If you don't know or don't want virtualenv, +you can also install ``pip`` globally by saying ``pypy -m ensurepip``. If you get errors from the C compiler, the module is a CPython C Extension module using unsupported features. `See below.`_ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -123,3 +123,13 @@ .. branch: fix-cpython-ssl-tests-2.7 Fix SSL tests by importing cpython's patch + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/Doc_stubgen_enable.patch deleted file mode 100644 --- a/pypy/module/cpyext/Doc_stubgen_enable.patch +++ /dev/null @@ -1,27 +0,0 @@ -Index: Doc/tools/sphinx/ext/refcounting.py -=================================================================== ---- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) -+++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) -@@ -91,6 +91,7 @@ - if app.config.refcount_file: - refcounts = Refcounts.fromfile( - path.join(app.srcdir, app.config.refcount_file)) -+ app._refcounts = refcounts - app.connect('doctree-read', refcounts.add_refcount_annotations) - - -Index: Doc/conf.py -=================================================================== ---- Doc/conf.py (Revision 79421) -+++ Doc/conf.py (Arbeitskopie) -@@ -13,8 +13,8 @@ - # General configuration - # --------------------- - --extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', -- 'sphinx.ext.doctest', 'pyspecific'] -+extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', -+ 'sphinx.ext.doctest', 'pyspecific', ] - templates_path = ['tools/sphinxext'] - - # General substitutions. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -441,8 +441,8 @@ TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'), + '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), @@ -505,7 +505,9 @@ def get_structtype_for_ctype(ctype): from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr from pypy.module.cpyext.cdatetime import PyDateTime_CAPI + from pypy.module.cpyext.intobject import PyIntObject return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, + "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] # Note: as a special case, "PyObject" is the pointer type in RPython, @@ -846,6 +848,7 @@ space.fromcache(State).install_dll(eci) # populate static data + builder = StaticObjectBuilder(space) for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext # for the eval() below w_obj = eval(expr) @@ -870,7 +873,7 @@ assert False, "Unknown static pointer: %s %s" % (typ, name) ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value - elif typ in ('PyObject*', 'PyTypeObject*'): + elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) @@ -879,17 +882,10 @@ # we have a structure, get its address in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll)) - from pypy.module.cpyext.pyobject import ( - track_reference, get_typedescr) - w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) - py_obj.c_ob_refcnt = 1 - py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, - make_ref(space, w_type)) - typedescr.attach(space, py_obj, w_obj) - track_reference(space, py_obj, w_obj) + builder.prepare(py_obj, w_obj) else: assert False, "Unknown static object: %s %s" % (typ, name) + builder.attach_all() pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') @@ -906,6 +902,36 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') + +class StaticObjectBuilder: + def __init__(self, space): + self.space = space + self.to_attach = [] + + def prepare(self, py_obj, w_obj): + from pypy.module.cpyext.pyobject import track_reference + py_obj.c_ob_refcnt = 1 + track_reference(self.space, py_obj, w_obj) + self.to_attach.append((py_obj, w_obj)) + + def attach_all(self): + from pypy.module.cpyext.pyobject import get_typedescr, make_ref + from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + space = self.space + space._cpyext_type_init = [] + for py_obj, w_obj in self.to_attach: + w_type = space.type(w_obj) + typedescr = get_typedescr(w_type.instancetypedef) + py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, + make_ref(space, w_type)) + typedescr.attach(space, py_obj, w_obj) + cpyext_type_init = space._cpyext_type_init + del space._cpyext_type_init + for pto, w_type in cpyext_type_init: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + + def mangle_name(prefix, name): if name.startswith('Py'): return prefix + name[2:] @@ -1100,14 +1126,33 @@ run_bootstrap_functions(space) setup_va_functions(eci) + from pypy.module import cpyext # for eval() below + + # Set up the types. Needs a special case, because of the + # immediate cycle involving 'c_ob_type', and because we don't + # want these types to be Py_TPFLAGS_HEAPTYPE. + static_types = {} + for name, (typ, expr) in GLOBALS.items(): + if typ == 'PyTypeObject*': + pto = lltype.malloc(PyTypeObject, immortal=True, + zero=True, flavor='raw') + pto.c_ob_refcnt = 1 + pto.c_tp_basicsize = -1 + static_types[name] = pto + builder = StaticObjectBuilder(space) + for name, pto in static_types.items(): + pto.c_ob_type = static_types['PyType_Type#'] + w_type = eval(GLOBALS[name][1]) + builder.prepare(rffi.cast(PyObject, pto), w_type) + builder.attach_all() + # populate static data for name, (typ, expr) in GLOBALS.iteritems(): name = name.replace("#", "") if name.startswith('PyExc_'): name = '_' + name - from pypy.module import cpyext w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*'): + if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -59,7 +59,7 @@ return None return borrow_from(w_dict, w_res) - at cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) + at cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -128,7 +128,7 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) - at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, + at cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject, PyCompilerFlagsPtr], PyObject) def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): """Execute Python source code from str in the context specified by the @@ -189,7 +189,7 @@ pi[0] = space.getindex_w(w_obj, None) return 1 - at cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], + at cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr], PyObject) def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the diff --git a/pypy/module/cpyext/patches/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch @@ -0,0 +1,27 @@ +Index: Doc/tools/sphinx/ext/refcounting.py +=================================================================== +--- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) ++++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) +@@ -91,6 +91,7 @@ + if app.config.refcount_file: + refcounts = Refcounts.fromfile( + path.join(app.srcdir, app.config.refcount_file)) ++ app._refcounts = refcounts + app.connect('doctree-read', refcounts.add_refcount_annotations) + + +Index: Doc/conf.py +=================================================================== +--- Doc/conf.py (Revision 79421) ++++ Doc/conf.py (Arbeitskopie) +@@ -13,8 +13,8 @@ + # General configuration + # --------------------- + +-extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', +- 'sphinx.ext.doctest', 'pyspecific'] ++extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', ++ 'sphinx.ext.doctest', 'pyspecific', ] + templates_path = ['tools/sphinxext'] + + # General substitutions. diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -1,6 +1,6 @@ import errno from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api +from pypy.module.cpyext.api import cpython_api, CONST_STRING from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat @@ -22,7 +22,7 @@ rfloat.DIST_NAN: Py_DTST_NAN } - at cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) + at cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) @jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -374,6 +374,11 @@ module = self.import_extension('foo', [ ("test_type", "METH_O", ''' + /* "args->ob_type" is a strange way to get at 'type', + which should have a different tp_getattro/tp_setattro + than its tp_base, which is 'object'. + */ + if (!args->ob_type->tp_setattro) { PyErr_SetString(PyExc_ValueError, "missing tp_setattro"); @@ -382,8 +387,12 @@ if (args->ob_type->tp_setattro == args->ob_type->tp_base->tp_setattro) { - PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); - return NULL; + /* Note that unlike CPython, in PyPy 'type.tp_setattro' + is the same function as 'object.tp_setattro'. This + test used to check that it was not, but that was an + artifact of the bootstrap logic only---in the final + C sources I checked and they are indeed the same. + So we ignore this problem here. */ } if (!args->ob_type->tp_getattro) { diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -146,7 +146,7 @@ assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) if not struct: - assert not space.config.translating + #assert not space.config.translating assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if slot_names[0] == 'c_tp_as_number': STRUCT_TYPE = PyNumberMethods @@ -310,36 +310,6 @@ realize=type_realize, dealloc=type_dealloc) - # There is the obvious cycle of 'type(type) == type', but there are - # also several other ones, like 'tuple.tp_bases' being itself a - # tuple instance. We solve the first one by creating the type - # "type" manually here. For the other cycles, we fix them by delaying - # creation of the types here, and hoping nothing breaks by seeing - # uninitialized-yet types (only for a few basic types like 'type', - # 'tuple', 'object', 'str'). - space._cpyext_delay_type_creation = [] - - py_type = _type_alloc(space, lltype.nullptr(PyTypeObject)) - py_type.c_ob_type = rffi.cast(PyTypeObjectPtr, py_type) - track_reference(space, py_type, space.w_type) - type_attach(space, py_type, space.w_type) - - as_pyobj(space, space.w_str) - as_pyobj(space, space.w_tuple) - as_pyobj(space, space.w_object) - - delayed_types = [] - while space._cpyext_delay_type_creation: - (py_obj, w_type) = space._cpyext_delay_type_creation.pop() - _type_really_attach(space, py_obj, w_type) - delayed_types.append((py_obj, w_type)) - del space._cpyext_delay_type_creation - for py_obj, w_type in delayed_types: - pto = rffi.cast(PyTypeObjectPtr, py_obj) - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) - finish_type_3(space, pto, w_type) - @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): @@ -440,16 +410,13 @@ def type_alloc(space, w_metatype): - metatype = make_ref(space, w_metatype) - metatype = rffi.cast(PyTypeObjectPtr, metatype) - assert metatype + metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype)) # Don't increase refcount for non-heaptypes - flags = rffi.cast(lltype.Signed, metatype.c_tp_flags) - if not flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, w_metatype) - return _type_alloc(space, metatype) + if metatype: + flags = rffi.cast(lltype.Signed, metatype.c_tp_flags) + if not flags & Py_TPFLAGS_HEAPTYPE: + Py_DecRef(space, w_metatype) -def _type_alloc(space, metatype): heaptype = lltype.malloc(PyHeapTypeObject.TO, flavor='raw', zero=True) pto = heaptype.c_ht_type @@ -460,7 +427,6 @@ pto.c_tp_as_sequence = heaptype.c_as_sequence pto.c_tp_as_mapping = heaptype.c_as_mapping pto.c_tp_as_buffer = heaptype.c_as_buffer - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 @@ -470,13 +436,6 @@ """ Fills a newly allocated PyTypeObject from an existing type. """ - if hasattr(space, '_cpyext_delay_type_creation'): - space._cpyext_delay_type_creation.append((py_obj, w_type)) - else: - _type_really_attach(space, py_obj, w_type) - return rffi.cast(PyTypeObjectPtr, py_obj) - -def _type_really_attach(space, py_obj, w_type): from pypy.module.cpyext.object import PyObject_Del assert isinstance(w_type, W_TypeObject) @@ -497,15 +456,24 @@ PyObject_Del.api_func.get_wrapper(space)) pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, PyType_GenericAlloc.api_func.get_wrapper(space)) + if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: + w_typename = space.getattr(w_type, space.wrap('__name__')) + heaptype = rffi.cast(PyHeapTypeObject, pto) + heaptype.c_ht_name = make_ref(space, w_typename) + from pypy.module.cpyext.stringobject import PyString_AsString + pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) + else: + pto.c_tp_name = rffi.str2charp(w_type.name) # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement # c_tp_compare and the following fields (see http://docs.python.org/c-api/typeobj.html ) w_base = best_base(space, w_type.bases_w) - py_base = make_ref(space, w_base) - pto.c_tp_base = rffi.cast(PyTypeObjectPtr, py_base) + pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base)) - if not hasattr(space, '_cpyext_delay_type_creation'): + if hasattr(space, '_cpyext_type_init'): + space._cpyext_type_init.append((pto, w_type)) + else: finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -519,21 +487,8 @@ if space.is_w(w_type, space.w_object): pto.c_tp_new = rffi.cast(newfunc, 1) update_all_slots(space, w_type, pto) - - if not hasattr(space, '_cpyext_delay_type_creation'): - finish_type_3(space, pto, w_type) - pto.c_tp_flags |= Py_TPFLAGS_READY - -def finish_type_3(space, pto, w_type): - if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: - w_typename = space.getattr(w_type, space.wrap('__name__')) - heaptype = rffi.cast(PyHeapTypeObject, pto) - heaptype.c_ht_name = make_ref(space, w_typename) - from pypy.module.cpyext.stringobject import PyString_AsString - pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) - else: - pto.c_tp_name = rffi.str2charp(w_type.name) + return pto def py_type_ready(space, pto): if pto.c_tp_flags & Py_TPFLAGS_READY: diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -83,9 +83,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -435,21 +435,21 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -466,7 +466,7 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) guard_not_invalidated? diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -84,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -93,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -82,7 +82,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -30,10 +30,10 @@ assert isinstance(terminator, Terminator) self.terminator = terminator - def read(self, obj, selector): - attr = self.find_map_attr(selector) + def read(self, obj, name, index): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._read_terminator(obj, selector) + return self.terminator._read_terminator(obj, name, index) if ( jit.isconstant(attr.storageindex) and jit.isconstant(obj) and @@ -47,39 +47,39 @@ def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) - def write(self, obj, selector, w_value): - attr = self.find_map_attr(selector) + def write(self, obj, name, index, w_value): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._write_terminator(obj, selector, w_value) + return self.terminator._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True obj._mapdict_write_storage(attr.storageindex, w_value) return True - def delete(self, obj, selector): + def delete(self, obj, name, index): pass - def find_map_attr(self, selector): + def find_map_attr(self, name, index): if jit.we_are_jitted(): # hack for the jit: # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._find_map_attr_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(name, index) else: - return self._find_map_attr_indirection(selector) + return self._find_map_attr_indirection(name, index) @jit.elidable def _find_map_attr_jit_pure(self, name, index): - return self._find_map_attr_indirection((name, index)) + return self._find_map_attr_indirection(name, index) @jit.dont_look_inside - def _find_map_attr_indirection(self, selector): + def _find_map_attr_indirection(self, name, index): if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(selector) - return self._find_map_attr(selector) + return self._find_map_attr_cache(name, index) + return self._find_map_attr(name, index) @jit.dont_look_inside - def _find_map_attr_cache(self, selector): + def _find_map_attr_cache(self, name, index): space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -87,31 +87,36 @@ attrs_as_int = objectmodel.current_object_addr_as_int(self) # ^^^Note: see comment in typeobject.py for # _pure_lookup_where_with_method_cache() - hash_selector = objectmodel.compute_hash(selector) + + # unrolled hash computation for 2-tuple + c1 = 0x345678 + c2 = 1000003 + hash_name = objectmodel.compute_hash(name) + hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index) product = intmask(attrs_as_int * hash_selector) attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[attr_hash] - if cached_selector == selector: + cached_name = cache.names[attr_hash] + cached_index = cache.indexes[attr_hash] + if cached_name == name and cached_index == index: attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 return attr - attr = self._find_map_attr(selector) + attr = self._find_map_attr(name, index) cache.attrs[attr_hash] = self - cache.selectors[attr_hash] = selector + cache.names[attr_hash] = name + cache.indexes[attr_hash] = index cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 return attr - def _find_map_attr(self, selector): + def _find_map_attr(self, name, index): while isinstance(self, PlainAttribute): - if selector == self.selector: + if name == self.name and index == self.index: return self self = self.back return None @@ -137,23 +142,22 @@ @jit.elidable def _get_new_attr(self, name, index): - selector = name, index cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} - attr = cache.get(selector, None) + attr = cache.get((name, index), None) if attr is None: - attr = PlainAttribute(selector, self) - cache[selector] = attr + attr = PlainAttribute(name, index, self) + cache[name, index] = attr return attr - @jit.look_inside_iff(lambda self, obj, selector, w_value: + @jit.look_inside_iff(lambda self, obj, name, index, w_value: jit.isconstant(self) and - jit.isconstant(selector[0]) and - jit.isconstant(selector[1])) - def add_attr(self, obj, selector, w_value): + jit.isconstant(name) and + jit.isconstant(index)) + def add_attr(self, obj, name, index, w_value): # grumble, jit needs this - attr = self._get_new_attr(selector[0], selector[1]) + attr = self._get_new_attr(name, index) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): size_est = (oldattr._size_estimate + attr.size_estimate() @@ -189,11 +193,11 @@ AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls - def _read_terminator(self, obj, selector): + def _read_terminator(self, obj, name, index): return None - def _write_terminator(self, obj, selector, w_value): - obj._get_mapdict_map().add_attr(obj, selector, w_value) + def _write_terminator(self, obj, name, index, w_value): + obj._get_mapdict_map().add_attr(obj, name, index, w_value) return True def copy(self, obj): @@ -231,40 +235,40 @@ class NoDictTerminator(Terminator): - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: return False - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) class DevolvedDictTerminator(Terminator): - def _read_terminator(self, obj, selector): - if selector[1] == DICT: + def _read_terminator(self, obj, name, index): + if index == DICT: space = self.space w_dict = obj.getdict(space) - return space.finditem_str(w_dict, selector[0]) - return Terminator._read_terminator(self, obj, selector) + return space.finditem_str(w_dict, name) + return Terminator._read_terminator(self, obj, name, index) - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: space = self.space w_dict = obj.getdict(space) - space.setitem_str(w_dict, selector[0], w_value) + space.setitem_str(w_dict, name, w_value) return True - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) - def delete(self, obj, selector): + def delete(self, obj, name, index): from pypy.interpreter.error import OperationError - if selector[1] == DICT: + if index == DICT: space = self.space w_dict = obj.getdict(space) try: - space.delitem(w_dict, space.wrap(selector[0])) + space.delitem(w_dict, space.wrap(name)) except OperationError, ex: if not ex.match(space, space.w_KeyError): raise return Terminator.copy(self, obj) - return Terminator.delete(self, obj, selector) + return Terminator.delete(self, obj, name, index) def remove_dict_entries(self, obj): assert 0, "should be unreachable" @@ -276,27 +280,28 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?'] - def __init__(self, selector, back): + def __init__(self, name, index, back): AbstractAttribute.__init__(self, back.space, back.terminator) - self.selector = selector + self.name = name + self.index = index self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False def _copy_attr(self, obj, new_obj): - w_value = self.read(obj, self.selector) - new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) + w_value = self.read(obj, self.name, self.index) + new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) - def delete(self, obj, selector): - if selector == self.selector: + def delete(self, obj, name, index): + if name == self.name and index == self.index: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True return self.back.copy(obj) - new_obj = self.back.delete(obj, selector) + new_obj = self.back.delete(obj, name, index) if new_obj is not None: self._copy_attr(obj, new_obj) return new_obj @@ -315,14 +320,14 @@ return new_obj def search(self, attrtype): - if self.selector[1] == attrtype: + if self.index == attrtype: return self return self.back.search(attrtype) def materialize_r_dict(self, space, obj, dict_w): new_obj = self.back.materialize_r_dict(space, obj, dict_w) - if self.selector[1] == DICT: - w_attr = space.wrap(self.selector[0]) + if self.index == DICT: + w_attr = space.wrap(self.name) dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) @@ -330,12 +335,12 @@ def remove_dict_entries(self, obj): new_obj = self.back.remove_dict_entries(obj) - if self.selector[1] != DICT: + if self.index != DICT: self._copy_attr(obj, new_obj) return new_obj def __repr__(self): - return "" % (self.selector, self.storageindex, self.back) + return "" % (self.name, self.index, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to @@ -347,8 +352,8 @@ assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE - self._empty_selector = (None, INVALID) - self.selectors = [self._empty_selector] * SIZE + self.names = [None] * SIZE + self.indexes = [INVALID] * SIZE self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} @@ -357,8 +362,9 @@ def clear(self): for i in range(len(self.attrs)): self.attrs[i] = None - for i in range(len(self.selectors)): - self.selectors[i] = self._empty_selector + for i in range(len(self.names)): + self.names[i] = None + self.indexes[i] = INVALID for i in range(len(self.cached_attrs)): self.cached_attrs[i] = None @@ -388,20 +394,20 @@ # objspace interface def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, (attrname, DICT)) + return self._get_mapdict_map().read(self, attrname, DICT) def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, (attrname, DICT), w_value) + return self._get_mapdict_map().write(self, attrname, DICT, w_value) def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, (attrname, DICT)) + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False self._become(new_obj) return True def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, ("dict", SPECIAL)) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) return w_dict @@ -409,7 +415,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag return w_dict @@ -425,7 +431,7 @@ # shell that continues to delegate to 'self'. if type(w_olddict.get_strategy()) is MapDictStrategy: w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag def getclass(self, space): @@ -443,16 +449,16 @@ self._init_empty(w_subtype.terminator) def getslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - return self._get_mapdict_map().read(self, key) + index = SLOTS_STARTING_FROM + slotindex + return self._get_mapdict_map().read(self, "slot", index) def setslotvalue(self, slotindex, w_value): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - self._get_mapdict_map().write(self, key, w_value) + index = SLOTS_STARTING_FROM + slotindex + self._get_mapdict_map().write(self, "slot", index, w_value) def delslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - new_obj = self._get_mapdict_map().delete(self, key) + index = SLOTS_STARTING_FROM + slotindex + new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False self._become(new_obj) @@ -462,7 +468,7 @@ def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, ("weakref", SPECIAL)) + lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) if lifeline is None: return None assert isinstance(lifeline, WeakrefLifeline) @@ -472,11 +478,11 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, ("weakref", SPECIAL), weakreflifeline) + self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) setweakref._cannot_really_call_random_things_ = True def delweakref(self): - self._get_mapdict_map().write(self, ("weakref", SPECIAL), None) + self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True class ObjectMixin(object): @@ -721,7 +727,7 @@ curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) if curr is None: raise KeyError - key = curr.selector[0] + key = curr.name w_value = self.getitem_str(w_dict, key) w_key = self.space.wrap(key) self.delitem(w_dict, w_key) @@ -758,7 +764,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr return None @@ -780,7 +786,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name return self.w_obj.getdictvalue(self.space, attr) return None @@ -801,7 +807,7 @@ curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr, self.w_obj.getdictvalue(self.space, attr) return None, None @@ -884,9 +890,9 @@ _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # - selector = ("", INVALID) + attrname, index = ("", INVALID) if w_descr is None: - selector = (name, DICT) # common case: no such attr in the class + attrname, index = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, MutableCell): pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): @@ -894,20 +900,21 @@ # (if any) has no relevance. from pypy.interpreter.typedef import Member if isinstance(w_descr, Member): # it is a slot -- easy case - selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) + attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. - selector = (name, DICT) + attrname = name + index = DICT # - if selector[1] != INVALID: - attr = map.find_map_attr(selector) + if index != INVALID: + attr = map.find_map_attr(attrname, index) if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.find_map_attr will always return None if selector[1]==DICT. + # map.find_map_attr will always return None if index==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -34,8 +34,8 @@ def test_plain_attribute(): w_cls = "class" - aa = PlainAttribute(("b", DICT), - PlainAttribute(("a", DICT), + aa = PlainAttribute("b", DICT, + PlainAttribute("a", DICT, Terminator(space, w_cls))) assert aa.space is space assert aa.terminator.w_cls is w_cls @@ -63,16 +63,16 @@ def test_huge_chain(): current = Terminator(space, "cls") for i in range(20000): - current = PlainAttribute((str(i), DICT), current) - assert current.find_map_attr(("0", DICT)).storageindex == 0 + current = PlainAttribute(str(i), DICT, current) + assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute(("b", DICT), PlainAttribute(("a", DICT), Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None - bb = PlainAttribute(("C", SPECIAL), PlainAttribute(("A", SLOTS_STARTING_FROM), aa)) + bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa)) assert bb.search(DICT) is aa assert bb.search(SLOTS_STARTING_FROM) is bb.back assert bb.search(SPECIAL) is bb @@ -320,7 +320,7 @@ d = {} w_d = FakeDict(d) - flag = obj.map.write(obj, ("dict", SPECIAL), w_d) + flag = obj.map.write(obj, "dict", SPECIAL, w_d) assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -152,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure @@ -608,9 +608,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr @@ -200,7 +201,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -234,7 +234,6 @@ self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1477,9 +1477,6 @@ genop_getfield_gc_f = _genop_getfield genop_getfield_raw_i = _genop_getfield genop_getfield_raw_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield def _genop_gc_load(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, sign_loc = arglocs diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -816,9 +816,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -183,6 +183,8 @@ return res def invalidate(self, descr): + if descr.is_always_pure(): + return for opinfo in self.cached_infos: assert isinstance(opinfo, info.AbstractStructPtrInfo) opinfo._fields[descr.get_index()] = None @@ -515,9 +517,14 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -525,23 +532,10 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self, cf=cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) @@ -631,12 +625,12 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC_PURE(s, descr='inst_x') + # x = GETFIELD_GC(s, descr='inst_x') # pure # If 's' is a constant (after optimizations) we rely on the rest of the - # optimizations to constant-fold the following getfield_gc_pure. + # optimizations to constant-fold the following pure getfield_gc. # in addition, we record the dependency here to make invalidation work # correctly. - # NB: emitting the GETFIELD_GC_PURE is only safe because the + # NB: emitting the pure GETFIELD_GC is only safe because the # QUASIIMMUT_FIELD is also emitted to make sure the dependency is # registered. structvalue = self.ensure_ptr_info_arg0(op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print +from rpython.jit.metainterp.optimize import SpeculativeError @@ -374,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): @@ -736,12 +738,64 @@ self.emit_operation(op) def constant_fold(self, op): + self.protect_speculative_operation(op) argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] return execute_nonspec_const(self.cpu, None, op.getopnum(), argboxes, op.getdescr(), op.type) + def protect_speculative_operation(self, op): + """When constant-folding a pure operation that reads memory from + a gcref, make sure that the gcref is non-null and of a valid type. + Otherwise, raise SpeculativeError. This should only occur when + unrolling and optimizing the unrolled loop. Note that if + cpu.supports_guard_gc_type is false, we can't really do this + check at all, but then we don't unroll in that case. + """ + opnum = op.getopnum() + cpu = self.cpu + + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): + fielddescr = op.getdescr() + ref = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_field(ref, fielddescr) + return + + elif (opnum == rop.GETARRAYITEM_GC_PURE_I or + opnum == rop.GETARRAYITEM_GC_PURE_R or + opnum == rop.GETARRAYITEM_GC_PURE_F or + opnum == rop.ARRAYLEN_GC): + arraydescr = op.getdescr() + array = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_array(array, arraydescr) + if opnum == rop.ARRAYLEN_GC: + return + arraylength = cpu.bh_arraylen_gc(array, arraydescr) + + elif (opnum == rop.STRGETITEM or + opnum == rop.STRLEN): + string = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_string(string) + if opnum == rop.STRLEN: + return + arraylength = cpu.bh_strlen(string) + + elif (opnum == rop.UNICODEGETITEM or + opnum == rop.UNICODELEN): + unicode = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_unicode(unicode) + if opnum == rop.UNICODELEN: + return + arraylength = cpu.bh_unicodelen(unicode) + + else: + return + + index = self.get_constant_box(op.getarg(1)).getint() + if not (0 <= index < arraylength): + raise SpeculativeError + def is_virtual(self, op): if op.type == 'r': opinfo = self.getptrinfo(op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -94,7 +94,6 @@ break else: # all constant arguments: constant-fold away - self.protect_speculative_operation(op) resbox = self.optimizer.constant_fold(op) # note that INT_xxx_OVF is not done from here, and the # overflows in the INT_xxx operations are ignored @@ -119,59 +118,6 @@ if nextop: self.emit_operation(nextop) - def protect_speculative_operation(self, op): - """When constant-folding a pure operation that reads memory from - a gcref, make sure that the gcref is non-null and of a valid type. - Otherwise, raise SpeculativeError. This should only occur when - unrolling and optimizing the unrolled loop. Note that if - cpu.supports_guard_gc_type is false, we can't really do this - check at all, but then we don't unroll in that case. - """ - opnum = op.getopnum() - cpu = self.optimizer.cpu - - if (opnum == rop.GETFIELD_GC_PURE_I or - opnum == rop.GETFIELD_GC_PURE_R or - opnum == rop.GETFIELD_GC_PURE_F): - fielddescr = op.getdescr() - ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) - return - - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() - array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) - if opnum == rop.ARRAYLEN_GC: - return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) - - elif (opnum == rop.STRGETITEM or - opnum == rop.STRLEN): - string = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_string(string) - if opnum == rop.STRLEN: - return - arraylength = cpu.bh_strlen(string) - - elif (opnum == rop.UNICODEGETITEM or - opnum == rop.UNICODELEN): - unicode = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_unicode(unicode) - if opnum == rop.UNICODELEN: - return - arraylength = cpu.bh_unicodelen(unicode) - - else: - return - - index = self.get_constant_box(op.getarg(1)).getint() - if not (0 <= index < arraylength): - raise SpeculativeError - def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: opnum = opnum - rop._OVF_FIRST diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -521,8 +521,8 @@ def test_getfield(self): graph = self.build_dependency(""" [p0, p1] # 0: 1,2,5 - p2 = getfield_gc_r(p0) # 1: 3,5 - p3 = getfield_gc_r(p0) # 2: 4 + p2 = getfield_gc_r(p0, descr=valuedescr) # 1: 3,5 + p3 = getfield_gc_r(p0, descr=valuedescr) # 2: 4 guard_nonnull(p2) [p2] # 3: 4,5 guard_nonnull(p3) [p3] # 4: 5 jump(p0,p2) # 5: @@ -532,10 +532,10 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,5 + p13 = getfield_gc_r(p9, descr=valuedescr) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 - i14 = getfield_gc_i(p9) # 3: 5 - p15 = getfield_gc_r(p13) # 4: 5 + i14 = getfield_gc_i(p9, descr=valuedescr) # 3: 5 + p15 = getfield_gc_r(p13, descr=valuedescr) # 4: 5 guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -955,12 +955,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -969,17 +969,16 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) - """ - self.node.value = 5 + jump(7) + """ self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1343,7 +1342,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1355,7 +1354,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1597,7 +1596,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1608,7 +1607,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -5076,7 +5075,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ @@ -5462,15 +5461,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1409,12 +1409,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_pure_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1423,10 +1423,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_pure_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1436,20 +1436,20 @@ self.node.value = 5 self.optimize_loop(ops, expected) - def test_getfield_gc_pure_3(self): + def test_pure_getfield_gc_3(self): ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2319,7 +2319,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2332,7 +2332,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2340,11 +2340,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2352,8 +2353,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2669,7 +2669,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2680,7 +2680,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -3302,8 +3302,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -3330,8 +3330,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3522,14 +3522,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4878,11 +4878,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4972,18 +4972,16 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - - """ - self.node.value = 5 + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) + """ self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4992,7 +4990,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5002,7 +5000,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5010,7 +5008,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5021,7 +5019,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -6167,14 +6165,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -7229,13 +7227,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7246,7 +7244,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7298,11 +7296,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7325,11 +7323,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7568,7 +7566,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7578,7 +7576,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7852,14 +7850,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7868,7 +7866,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7883,14 +7881,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) - """ - expected = """ - [p0, p2, p1] + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) + """ + expected = """ + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -8065,7 +8063,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8160,9 +8158,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8201,14 +8199,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) From pypy.commits at gmail.com Wed Jan 27 13:15:13 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:15:13 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: fix Message-ID: <56a90931.03231c0a.f7411.37aa@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81980:4f7fc4ae3711 Date: 2016-01-27 19:14 +0100 http://bitbucket.org/pypy/pypy/changeset/4f7fc4ae3711/ Log: fix diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -291,7 +291,7 @@ @specialize.ll() def unwrapper(space, *args): from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj - from pypy.module.cpyext.pyobject import make_ref, from_ref + from pypy.module.cpyext.pyobject import from_ref, as_pyobj newargs = () keepalives = () assert len(args) == len(api_function.argtypes) @@ -301,7 +301,7 @@ # build a 'PyObject *' (not holding a reference) if not is_pyobj(input_arg): keepalives += (input_arg,) - arg = rffi.cast(ARG, as_xpyobj(space, input_arg)) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) else: arg = rffi.cast(ARG, input_arg) elif is_PyObject(ARG) and is_wrapped: From pypy.commits at gmail.com Wed Jan 27 13:23:06 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 10:23:06 -0800 (PST) Subject: [pypy-commit] pypy cpyext-gc-support-2: yay, test_api passes Message-ID: <56a90b0a.4e0e1c0a.b73cf.7b90@mx.google.com> Author: Armin Rigo Branch: cpyext-gc-support-2 Changeset: r81981:a3c663eba7c9 Date: 2016-01-27 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/a3c663eba7c9/ Log: yay, test_api passes diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -80,12 +80,11 @@ from pypy.module.cpyext.typeobject import setup_new_method_def from pypy.module.cpyext.api import INIT_FUNCTIONS - from pypy.module.cpyext.api import init_static_data_translated if we_are_translated(): rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER, self.dealloc_trigger)) - init_static_data_translated(space) + XXX#init_static_data_translated(space) setup_new_method_def(space) diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -53,7 +53,6 @@ return state.clear_exception() def setup_method(self, func): - #return # ZZZ freeze_refcnts(self) def teardown_method(self, func): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -14,7 +14,7 @@ from rpython.tool.udir import udir from pypy.module.cpyext import api from pypy.module.cpyext.state import State -from pypy.module.cpyext.pyobject import RefcountState +from pypy.module.cpyext.pyobject import debug_collect from pypy.module.cpyext.pyobject import Py_DecRef, InvalidPointerException from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder @@ -92,6 +92,7 @@ return str(pydname) def freeze_refcnts(self): + return #ZZZ state = self.space.fromcache(RefcountState) self.frozen_refcounts = {} for w_obj, obj in state.py_objects_w2r.iteritems(): @@ -109,6 +110,7 @@ @staticmethod def cleanup_references(space): + return #ZZZ state = space.fromcache(RefcountState) import gc; gc.collect() @@ -127,6 +129,8 @@ state.reset_borrowed_references() def check_and_print_leaks(self): + debug_collect() + return #ZZZ # check for sane refcnts import gc @@ -212,8 +216,8 @@ cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts - state = cls.space.fromcache(RefcountState) - state.non_heaptypes_w[:] = [] + #state = cls.space.fromcache(RefcountState) ZZZ + #state.non_heaptypes_w[:] = [] def setup_method(self, func): @unwrap_spec(name=str) @@ -348,7 +352,7 @@ interp2app(record_imported_module)) self.w_here = self.space.wrap( str(py.path.local(pypydir)) + '/module/cpyext/test/') - + self.w_debug_collect = self.space.wrap(interp2app(debug_collect)) # create the file lock before we count allocations self.space.call_method(self.space.sys.get("stdout"), "flush") @@ -647,7 +651,7 @@ Py_DECREF(true_obj); Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); + return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) { @@ -662,8 +666,10 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt); + fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, + true_obj->ob_refcnt); + return PyBool_FromLong(refcnt_after == refcnt + 1 && + refcnt == true_obj->ob_refcnt); } static PyMethodDef methods[] = { diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -568,9 +568,6 @@ finish_type_2(space, py_type, w_obj) - state = space.fromcache(RefcountState) - state.non_heaptypes_w.append(w_obj) - return w_obj def finish_type_1(space, pto): From pypy.commits at gmail.com Wed Jan 27 13:25:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 27 Jan 2016 10:25:02 -0800 (PST) Subject: [pypy-commit] pypy default: Document branch 'exctrans' Message-ID: <56a90b7e.01cdc20a.16151.ffffce48@mx.google.com> Author: Ronan Lamy Branch: Changeset: r81982:a27b8ecd898e Date: 2016-01-27 18:23 +0000 http://bitbucket.org/pypy/pypy/changeset/a27b8ecd898e/ Log: Document branch 'exctrans' diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -133,4 +133,9 @@ `rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen for traces containing a large number of pure getfield operations. +.. branch: exctrans + +Try to ensure that no new functions get annotated during the 'source_c' phase. +Refactor sandboxing to operate at a higher level. + .. branch: cpyext-bootstrap From pypy.commits at gmail.com Wed Jan 27 14:16:27 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 27 Jan 2016 11:16:27 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: Mention the "@pypy-4.0.1" part of the url Message-ID: <56a9178b.cb571c0a.30ca2.ffff93f9@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r697:848d8a627c0e Date: 2016-01-27 20:16 +0100 http://bitbucket.org/pypy/pypy.org/changeset/848d8a627c0e/ Log: Mention the "@pypy-4.0.1" part of the url diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -214,7 +214,10 @@

      If you have pip:

       pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
      +pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-4.0.1
       
      +

      (the second version selects a particular tag, which may be needed if your +pypy is not the latest development version.)

      Alternatively, the direct way:

       git clone https://bitbucket.org/pypy/numpy.git
      diff --git a/source/download.txt b/source/download.txt
      --- a/source/download.txt
      +++ b/source/download.txt
      @@ -225,6 +225,10 @@
       If you have pip::
       
           pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
      +    pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-4.0.1
      +
      +(the second version selects a particular tag, which may be needed if your
      +pypy is not the latest development version.)
       
       Alternatively, the direct way::
       
      
      From pypy.commits at gmail.com  Wed Jan 27 14:48:07 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Wed, 27 Jan 2016 11:48:07 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: added ztranslation call assembler
       test and some more (hurray,
       one of them fails and this might be the bug I'm searching for)
      Message-ID: <56a91ef7.8673c20a.8824a.ffffec35@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81983:228ce776f7f6
      Date: 2016-01-27 20:46 +0100
      http://bitbucket.org/pypy/pypy/changeset/228ce776f7f6/
      
      Log:	added ztranslation call assembler test and some more (hurray, one of
      	them fails and this might be the bug I'm searching for)
      
      diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
      --- a/rpython/jit/backend/zarch/assembler.py
      +++ b/rpython/jit/backend/zarch/assembler.py
      @@ -278,7 +278,7 @@
               if exctploc is not None:
                   mc.LG(exctploc, l.addr(diff, r.SCRATCH))
               # Zero out the exception fields
      -        mc.LGHI(r.SCRATCH2, l.imm(0))
      +        mc.XGR(r.SCRATCH2, r.SCRATCH2)
               mc.STG(r.SCRATCH2, l.addr(0, r.SCRATCH))
               mc.STG(r.SCRATCH2, l.addr(diff, r.SCRATCH))
       
      @@ -1169,10 +1169,8 @@
                   # to be executed, thus remove the first opcode
                   self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count)
               else:
      -            # restore the pool address
                   offset = self.pool.get_descr_offset(descr) + \
                            JUMPABS_TARGET_ADDR__POOL_OFFSET
      -            offset_pool = offset + JUMPABS_POOL_ADDR_POOL_OFFSET
                   self.mc.LG(r.SCRATCH, l.pool(offset))
                   self.mc.BCR(c.ANY, r.SCRATCH)
       
      diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
      --- a/rpython/jit/backend/zarch/opassembler.py
      +++ b/rpython/jit/backend/zarch/opassembler.py
      @@ -836,6 +836,22 @@
           def emit_restore_exception(self, op, arglocs, regalloc):
               self._restore_exception(self.mc, arglocs[1], arglocs[0])
       
      +    def emit_guard_no_exception(self, op, arglocs, regalloc):
      +        self.mc.load_imm(r.SCRATCH, self.cpu.pos_exception())
      +        self.mc.LG(r.SCRATCH2, l.addr(0,r.SCRATCH))
      +        self.mc.cmp_op(r.SCRATCH2, l.imm(0), imm=True)
      +        self.guard_success_cc = c.EQ
      +        self._emit_guard(op, arglocs)
      +        # If the previous operation was a COND_CALL, overwrite its conditional
      +        # jump to jump over this GUARD_NO_EXCEPTION as well, if we can
      +        if self._find_nearby_operation(regalloc,-1).getopnum() == rop.COND_CALL:
      +            jmp_adr, fcond = self.previous_cond_call_jcond
      +            relative_target = self.mc.currpos() - jmp_adr
      +            pmc = OverwritingBuilder(self.mc, jmp_adr, 1)
      +            pmc.BRCL(fcond, l.imm(relative_target))
      +            pmc.overwrite()
      +
      +
       class MemoryOpAssembler(object):
           _mixin_ = True
       
      @@ -1194,21 +1210,6 @@
           def emit_leave_portal_frame(self, op, arglocs, regalloc):
               self.leave_portal_frame(op)
       
      -    def emit_guard_no_exception(self, op, arglocs, regalloc):
      -        self.mc.load_imm(r.SCRATCH, self.cpu.pos_exception())
      -        self.mc.LG(r.SCRATCH2, l.addr(0,r.SCRATCH))
      -        self.mc.cmp_op(r.SCRATCH2, l.imm(0), imm=True)
      -        self.guard_success_cc = c.EQ
      -        self._emit_guard(op, arglocs)
      -        # If the previous operation was a COND_CALL, overwrite its conditional
      -        # jump to jump over this GUARD_NO_EXCEPTION as well, if we can
      -        if self._find_nearby_operation(regalloc,-1).getopnum() == rop.COND_CALL:
      -            jmp_adr, fcond = self.previous_cond_call_jcond
      -            relative_target = self.mc.currpos() - jmp_adr
      -            pmc = OverwritingBuilder(self.mc, jmp_adr, 1)
      -            pmc.BRCL(fcond, l.imm(relative_target))
      -            pmc.overwrite()
      -
       class OpAssembler(IntOpAssembler, FloatOpAssembler,
                         GuardOpAssembler, CallOpAssembler,
                         AllocOpAssembler, MemoryOpAssembler,
      diff --git a/rpython/jit/backend/zarch/test/test_tl.py b/rpython/jit/backend/zarch/test/test_tl.py
      new file mode 100644
      --- /dev/null
      +++ b/rpython/jit/backend/zarch/test/test_tl.py
      @@ -0,0 +1,9 @@
      +import py
      +from rpython.jit.metainterp.test.test_tl import ToyLanguageTests
      +from rpython.jit.backend.zarch.test.support import JitZARCHMixin
      +
      +class TestTL(JitZARCHMixin, ToyLanguageTests):
      +    # for the individual tests see
      +    # ====> ../../../metainterp/test/test_tl.py
      +    pass
      +
      diff --git a/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py
      new file mode 100644
      --- /dev/null
      +++ b/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py
      @@ -0,0 +1,10 @@
      +from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler
      +from rpython.translator.translator import TranslationContext
      +from rpython.config.translationoption import DEFL_GC
      +from rpython.jit.backend.zarch.arch import WORD
      +import sys
      +
      +class TestTranslationCallAssemblerZARCH(TranslationTestCallAssembler):
      +    def _check_cbuilder(self, cbuilder):
      +        pass
      +
      diff --git a/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py b/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py
      new file mode 100644
      --- /dev/null
      +++ b/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py
      @@ -0,0 +1,19 @@
      +from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
      +from rpython.translator.translator import TranslationContext
      +from rpython.config.translationoption import DEFL_GC
      +from rpython.translator.platform import platform as compiler
      +
      +if compiler.name == 'msvc':
      +    _MSVC = True
      +else:
      +    _MSVC = False
      +
      +class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest):
      +    def _get_TranslationContext(self):
      +        t = TranslationContext()
      +        t.config.translation.gc = DEFL_GC   # 'hybrid' or 'minimark'
      +        if not _MSVC:
      +            t.config.translation.gcrootfinder = 'asmgcc'
      +        t.config.translation.list_comprehension_operations = True
      +        t.config.translation.gcremovetypeptr = True
      +        return t
      
      From pypy.commits at gmail.com  Wed Jan 27 14:49:10 2016
      From: pypy.commits at gmail.com (arigo)
      Date: Wed, 27 Jan 2016 11:49:10 -0800 (PST)
      Subject: [pypy-commit] pypy cpyext-gc-support-2: Add a comment (thanks ronan)
      Message-ID: <56a91f36.05bd1c0a.11b43.5dd9@mx.google.com>
      
      Author: Armin Rigo 
      Branch: cpyext-gc-support-2
      Changeset: r81984:97395b8025f0
      Date: 2016-01-27 20:48 +0100
      http://bitbucket.org/pypy/pypy/changeset/97395b8025f0/
      
      Log:	Add a comment (thanks ronan)
      
      diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
      --- a/rpython/rlib/rawrefcount.py
      +++ b/rpython/rlib/rawrefcount.py
      @@ -1,6 +1,9 @@
       #
       #  See documentation in pypy/doc/discussion/rawrefcount.rst
       #
      +#  This is meant for pypy's cpyext module, but is a generally
      +#  useful interface over our GC.  XXX "pypy" should be removed here
      +#
       import sys, weakref
       from rpython.rtyper.lltypesystem import lltype, llmemory
       from rpython.rlib.objectmodel import we_are_translated, specialize
      
      From pypy.commits at gmail.com  Wed Jan 27 17:05:53 2016
      From: pypy.commits at gmail.com (mattip)
      Date: Wed, 27 Jan 2016 14:05:53 -0800 (PST)
      Subject: [pypy-commit] pypy default: remove all traces of micronumpy from
       cpyext if --withoutmod-micronumpy option used
      Message-ID: <56a93f41.44e21c0a.c9c17.ffff8792@mx.google.com>
      
      Author: mattip 
      Branch: 
      Changeset: r81985:3ec0e1e24a2e
      Date: 2016-01-28 00:04 +0200
      http://bitbucket.org/pypy/pypy/changeset/3ec0e1e24a2e/
      
      Log:	remove all traces of micronumpy from cpyext if --withoutmod-
      	micronumpy option used
      
      	also back out ec6457a2d845
      
      diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
      --- a/pypy/config/pypyoption.py
      +++ b/pypy/config/pypyoption.py
      @@ -85,8 +85,7 @@
       module_dependencies = {
           '_multiprocessing': [('objspace.usemodules.time', True),
                                ('objspace.usemodules.thread', True)],
      -    'cpyext': [('objspace.usemodules.array', True),
      -               ('objspace.usemodules.micronumpy', True)],
      +    'cpyext': [('objspace.usemodules.array', True)],
           'cppyy': [('objspace.usemodules.cpyext', True)],
           }
       module_suggests = {
      diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py
      --- a/pypy/module/cpyext/__init__.py
      +++ b/pypy/module/cpyext/__init__.py
      @@ -36,7 +36,6 @@
       import pypy.module.cpyext.object
       import pypy.module.cpyext.stringobject
       import pypy.module.cpyext.tupleobject
      -import pypy.module.cpyext.ndarrayobject
       import pypy.module.cpyext.setobject
       import pypy.module.cpyext.dictobject
       import pypy.module.cpyext.intobject
      diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
      --- a/pypy/module/cpyext/api.py
      +++ b/pypy/module/cpyext/api.py
      @@ -143,7 +143,7 @@
               target.chmod(0444) # make the file read-only, to make sure that nobody
                                  # edits it by mistake
       
      -def copy_header_files(dstdir):
      +def copy_header_files(dstdir, copy_numpy_headers):
           # XXX: 20 lines of code to recursively copy a directory, really??
           assert dstdir.check(dir=True)
           headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl')
      @@ -151,15 +151,16 @@
               headers.append(udir.join(name))
           _copy_header_files(headers, dstdir)
       
      -    try:
      -        dstdir.mkdir('numpy')
      -    except py.error.EEXIST:
      -        pass
      -    numpy_dstdir = dstdir / 'numpy'
      +    if copy_numpy_headers:
      +        try:
      +            dstdir.mkdir('numpy')
      +        except py.error.EEXIST:
      +            pass
      +        numpy_dstdir = dstdir / 'numpy'
       
      -    numpy_include_dir = include_dir / 'numpy'
      -    numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
      -    _copy_header_files(numpy_headers, numpy_dstdir)
      +        numpy_include_dir = include_dir / 'numpy'
      +        numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
      +        _copy_header_files(numpy_headers, numpy_dstdir)
       
       
       class NotSpecified(object):
      @@ -482,7 +483,6 @@
               "PyComplex_Type": "space.w_complex",
               "PyByteArray_Type": "space.w_bytearray",
               "PyMemoryView_Type": "space.w_memoryview",
      -        "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)",
               "PyBaseObject_Type": "space.w_object",
               'PyNone_Type': 'space.type(space.w_None)',
               'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)',
      @@ -773,6 +773,8 @@
           "NOT_RPYTHON"
           from pypy.module.cpyext.pyobject import make_ref
       
      +    use_micronumpy = setup_micronumpy(space)
      +
           export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS)
           from rpython.translator.c.database import LowLevelDatabase
           db = LowLevelDatabase()
      @@ -1009,6 +1011,24 @@
           pypy_decl_h.write('\n'.join(pypy_decls))
           return functions
       
      +separate_module_files = [source_dir / "varargwrapper.c",
      +                         source_dir / "pyerrors.c",
      +                         source_dir / "modsupport.c",
      +                         source_dir / "getargs.c",
      +                         source_dir / "abstract.c",
      +                         source_dir / "stringobject.c",
      +                         source_dir / "mysnprintf.c",
      +                         source_dir / "pythonrun.c",
      +                         source_dir / "sysmodule.c",
      +                         source_dir / "bufferobject.c",
      +                         source_dir / "cobject.c",
      +                         source_dir / "structseq.c",
      +                         source_dir / "capsule.c",
      +                         source_dir / "pysignals.c",
      +                         source_dir / "pythread.c",
      +                         source_dir / "missing.c",
      +                         ]
      +
       def build_eci(building_bridge, export_symbols, code):
           "NOT_RPYTHON"
           # Build code and get pointer to the structure
      @@ -1062,24 +1082,7 @@
       
           eci = ExternalCompilationInfo(
               include_dirs=include_dirs,
      -        separate_module_files=[source_dir / "varargwrapper.c",
      -                               source_dir / "pyerrors.c",
      -                               source_dir / "modsupport.c",
      -                               source_dir / "getargs.c",
      -                               source_dir / "abstract.c",
      -                               source_dir / "stringobject.c",
      -                               source_dir / "mysnprintf.c",
      -                               source_dir / "pythonrun.c",
      -                               source_dir / "sysmodule.c",
      -                               source_dir / "bufferobject.c",
      -                               source_dir / "cobject.c",
      -                               source_dir / "structseq.c",
      -                               source_dir / "capsule.c",
      -                               source_dir / "pysignals.c",
      -                               source_dir / "pythread.c",
      -                               source_dir / "ndarrayobject.c",
      -                               source_dir / "missing.c",
      -                               ],
      +        separate_module_files= separate_module_files,
               separate_module_sources=separate_module_sources,
               compile_extra=compile_extra,
               **kwds
      @@ -1087,10 +1090,22 @@
       
           return eci
       
      +def setup_micronumpy(space):
      +    use_micronumpy = space.config.objspace.usemodules.micronumpy
      +    if not use_micronumpy:
      +        return use_micronumpy
      +    # import to register api functions by side-effect
      +    import pypy.module.cpyext.ndarrayobject 
      +    global GLOBALS, SYMBOLS_C, separate_module_files
      +    GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)")
      +    SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']
      +    separate_module_files.append(source_dir / "ndarrayobject.c")
      +    return use_micronumpy
       
       def setup_library(space):
           "NOT_RPYTHON"
           from pypy.module.cpyext.pyobject import make_ref
      +    use_micronumpy = setup_micronumpy(space)
       
           export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS)
           from rpython.translator.c.database import LowLevelDatabase
      @@ -1151,7 +1166,7 @@
       
           setup_init_functions(eci, translating=True)
           trunk_include = pypydir.dirpath() / 'include'
      -    copy_header_files(trunk_include)
      +    copy_header_files(trunk_include, use_micronumpy)
       
       def _load_from_cffi(space, name, path, initptr):
           from pypy.module._cffi_backend import cffi1_module
      diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
      --- a/pypy/module/cpyext/test/test_api.py
      +++ b/pypy/module/cpyext/test/test_api.py
      @@ -98,7 +98,7 @@
       
       
       def test_copy_header_files(tmpdir):
      -    api.copy_header_files(tmpdir)
      +    api.copy_header_files(tmpdir, True)
           def check(name):
               f = tmpdir.join(name)
               assert f.check(file=True)
      
      From pypy.commits at gmail.com  Wed Jan 27 18:25:07 2016
      From: pypy.commits at gmail.com (mjacob)
      Date: Wed, 27 Jan 2016 15:25:07 -0800 (PST)
      Subject: [pypy-commit] pypy py3.3: hg merge 03591a1499c8
      Message-ID: <56a951d3.01941c0a.f20b2.0f1b@mx.google.com>
      
      Author: Manuel Jacob 
      Branch: py3.3
      Changeset: r81986:edefd168b2b3
      Date: 2016-01-27 19:52 +0100
      http://bitbucket.org/pypy/pypy/changeset/edefd168b2b3/
      
      Log:	hg merge 03591a1499c8
      
      	This merges marky1991's changes before he merged the problematic
      	merge 88aafcb7c318.
      
      diff --git a/lib-python/3/code.py b/lib-python/3/code.py
      --- a/lib-python/3/code.py
      +++ b/lib-python/3/code.py
      @@ -105,10 +105,9 @@
               The output is written by self.write(), below.
       
               """
      -        type, value, tb = sys.exc_info()
      +        type, value, sys.last_traceback = sys.exc_info()
               sys.last_type = type
               sys.last_value = value
      -        sys.last_traceback = tb
               if filename and type is SyntaxError:
                   # Work hard to stuff the correct filename in the exception
                   try:
      @@ -126,7 +125,7 @@
               else:
                   # If someone has set sys.excepthook, we let that take precedence
                   # over self.write
      -            sys.excepthook(type, value, tb)
      +            sys.excepthook(type, value, self.last_traceback)
       
           def showtraceback(self):
               """Display the exception that just occurred.
      @@ -136,25 +135,35 @@
               The output is written by self.write(), below.
       
               """
      +        sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
      +        sys.last_traceback = last_tb
               try:
      -            type, value, tb = sys.exc_info()
      -            sys.last_type = type
      -            sys.last_value = value
      -            sys.last_traceback = tb
      -            tblist = traceback.extract_tb(tb)
      -            del tblist[:1]
      -            lines = traceback.format_list(tblist)
      -            if lines:
      -                lines.insert(0, "Traceback (most recent call last):\n")
      -            lines.extend(traceback.format_exception_only(type, value))
      +            lines = []
      +            for value, tb in traceback._iter_chain(*ei[1:]):
      +                if isinstance(value, str):
      +                    lines.append(value)
      +                    lines.append('\n')
      +                    continue
      +                if tb:
      +                    tblist = traceback.extract_tb(tb)
      +                    if tb is last_tb:
      +                        # The last traceback includes the frame we
      +                        # exec'd in
      +                        del tblist[:1]
      +                    tblines = traceback.format_list(tblist)
      +                    if tblines:
      +                        lines.append("Traceback (most recent call last):\n")
      +                        lines.extend(tblines)
      +                lines.extend(traceback.format_exception_only(type(value),
      +                                                             value))
               finally:
      -            tblist = tb = None
      +            tblist = last_tb = ei = None
               if sys.excepthook is sys.__excepthook__:
                   self.write(''.join(lines))
               else:
                   # If someone has set sys.excepthook, we let that take precedence
                   # over self.write
      -            sys.excepthook(type, value, tb)
      +            sys.excepthook(sys.last_type, sys.last_value, last_tb)
       
           def write(self, data):
               """Write a string.
      diff --git a/lib-python/3/pickle.py b/lib-python/3/pickle.py
      --- a/lib-python/3/pickle.py
      +++ b/lib-python/3/pickle.py
      @@ -23,7 +23,7 @@
       
       """
       
      -from types import FunctionType, BuiltinFunctionType
      +from types import FunctionType, BuiltinFunctionType, ModuleType
       from copyreg import dispatch_table
       from copyreg import _extension_registry, _inverted_registry, _extension_cache
       import marshal
      @@ -622,6 +622,9 @@
                   # else tmp is empty, and we're done
       
           def save_dict(self, obj):
      +        modict_saver = self._pickle_maybe_moduledict(obj)
      +        if modict_saver is not None:
      +            return self.save_reduce(*modict_saver)
               write = self.write
       
               if self.bin:
      @@ -672,6 +675,44 @@
                       write(SETITEM)
                   # else tmp is empty, and we're done
       
      +    def _pickle_maybe_moduledict(self, obj):
      +        # save module dictionary as "getattr(module, '__dict__')"
      +        try:
      +            name = obj['__name__']
      +            if type(name) is not str:
      +                return None
      +            themodule = sys.modules[name]
      +            if type(themodule) is not ModuleType:
      +                return None
      +            if themodule.__dict__ is not obj:
      +                return None
      +        except (AttributeError, KeyError, TypeError):
      +            return None
      +        return getattr, (themodule, '__dict__')
      +
      +    def save_function(self, obj):
      +        try:
      +            return self.save_global(obj)
      +        except PicklingError:
      +            pass
      +        # Check copy_reg.dispatch_table
      +        reduce = dispatch_table.get(type(obj))
      +        if reduce:
      +            rv = reduce(obj)
      +        else:
      +            # Check for a __reduce_ex__ method, fall back to __reduce__
      +            reduce = getattr(obj, "__reduce_ex__", None)
      +            if reduce:
      +                rv = reduce(self.proto)
      +            else:
      +                reduce = getattr(obj, "__reduce__", None)
      +                if reduce:
      +                    rv = reduce()
      +                else:
      +                    raise e
      +        return self.save_reduce(obj=obj, *rv)
      +    dispatch[FunctionType] = save_function
      +
           def save_global(self, obj, name=None, pack=struct.pack):
               write = self.write
               memo = self.memo
      @@ -737,7 +778,7 @@
                   return self.save_reduce(type, (...,), obj=obj)
               return self.save_global(obj)
       
      -    dispatch[FunctionType] = save_global
      +    dispatch[FunctionType] = save_function
           dispatch[BuiltinFunctionType] = save_global
           dispatch[type] = save_type
       
      diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
      --- a/pypy/interpreter/astcompiler/codegen.py
      +++ b/pypy/interpreter/astcompiler/codegen.py
      @@ -1381,7 +1381,7 @@
               # ... and store it as __module__
               self.name_op("__module__", ast.Store)
               # store the qualname
      -        w_qualname = self.space.wrap(self.qualname)
      +        w_qualname = self.space.wrap(self.qualname.decode("utf-8"))
               self.load_const(w_qualname)
               self.name_op("__qualname__", ast.Store)
               # compile the body proper
      diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
      --- a/pypy/interpreter/function.py
      +++ b/pypy/interpreter/function.py
      @@ -306,6 +306,7 @@
               tup_base = []
               tup_state = [
                   w(self.name),
      +            w(self.qualname),
                   w_doc,
                   w(self.code),
                   w_func_globals,
      @@ -319,8 +320,8 @@
           def descr_function__setstate__(self, space, w_args):
               args_w = space.unpackiterable(w_args)
               try:
      -            (w_name, w_doc, w_code, w_func_globals, w_closure, w_defs,
      -             w_func_dict, w_module) = args_w
      +            (w_name, w_qualname, w_doc, w_code, w_func_globals, w_closure,
      +             w_defs, w_func_dict, w_module) = args_w
               except ValueError:
                   # wrong args
                   raise OperationError(space.w_ValueError,
      @@ -328,6 +329,7 @@
       
               self.space = space
               self.name = space.str_w(w_name)
      +        self.qualname = space.str_w(w_qualname).decode("utf-8")
               self.code = space.interp_w(Code, w_code)
               if not space.is_w(w_closure, space.w_None):
                   from pypy.interpreter.nestedscope import Cell
      diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
      --- a/pypy/interpreter/test/test_app_main.py
      +++ b/pypy/interpreter/test/test_app_main.py
      @@ -808,31 +808,6 @@
               data = self.run('-S -i', expect_prompt=True, expect_banner=True)
               assert 'copyright' not in data
       
      -    def test_non_interactive_stdout_fully_buffered(self):
      -        if os.name == 'nt':
      -            try:
      -                import __pypy__
      -            except:
      -                py.test.skip('app_main cannot run on non-pypy for windows')
      -        path = getscript(r"""
      -            import sys, time
      -            sys.stdout.write('\x00(STDOUT)\n\x00')   # stays in buffers
      -            time.sleep(1)
      -            sys.stderr.write('\x00[STDERR]\n\x00')
      -            time.sleep(1)
      -            # stdout flushed automatically here
      -            """)
      -        cmdline = '%s -u "%s" %s' % (python3, app_main, path)
      -        print 'POPEN:', cmdline
      -        child_in, child_out_err = os.popen4(cmdline)
      -        data = child_out_err.read(11)
      -        # Py3 is always at least line buffered
      -        assert data == '\x00(STDOUT)\n\x00'    # from stdout
      -        child_in.close()
      -        data = child_out_err.read(11)
      -        assert data == '\x00[STDERR]\n\x00'    # from stderr
      -        child_out_err.close()
      -
           def test_non_interactive_stdout_unbuffered(self, monkeypatch):
               monkeypatch.setenv('PYTHONUNBUFFERED', '1')
               if os.name == 'nt':
      diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
      --- a/pypy/interpreter/test/test_zzpickle_and_slow.py
      +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
      @@ -394,8 +394,10 @@
               import pickle
               tdict = {'2':2, '3':3, '5':5}
               diter  = iter(tdict)
      -        next(diter)
      -        raises(TypeError, pickle.dumps, diter)
      +        seen = next(diter)
      +        pckl = pickle.dumps(diter)
      +        result = pickle.loads(pckl)
      +        assert set(result) == (set('235') - set(seen))
       
           def test_pickle_reversed(self):
               import pickle
      diff --git a/pypy/module/__pypy__/test/test_stderrprinter.py b/pypy/module/__pypy__/test/test_stderrprinter.py
      --- a/pypy/module/__pypy__/test/test_stderrprinter.py
      +++ b/pypy/module/__pypy__/test/test_stderrprinter.py
      @@ -7,7 +7,9 @@
           p.close()  # this should be a no-op
           p.flush()  # this should be a no-op
           assert p.fileno() == 2
      -    assert p.isatty()
      +    # It doesn't make sense to assert this.  Stderror could be a tty
      +    # (the terminal) or not, depending on how we are running the tests.
      +    # assert p.isatty()
           assert p.write('foo') == 3
           raises(TypeError, p.write, b'foo')
       
      diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
      --- a/pypy/module/_cffi_backend/test/_backend_test_c.py
      +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
      @@ -1,6 +1,9 @@
       # ____________________________________________________________
       
       import sys
      +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version"
      +                                " of cffi that differs from the one that we"
      +                                " get from 'import _cffi_backend'")
       if sys.version_info < (3,):
           type_or_class = "type"
           mandatory_b_prefix = ''
      @@ -2313,9 +2316,6 @@
           f(); f()
           assert get_errno() == 77
       
      -def test_abi():
      -    assert isinstance(FFI_DEFAULT_ABI, int)
      -
       def test_cast_to_array():
           # not valid in C!  extension to get a non-owning 
           BInt = new_primitive_type("int")
      @@ -3393,6 +3393,78 @@
           check(4 | 8,  "CHB", "GTB")
           check(4 | 16, "CHB", "ROB")
       
      +def test_memmove():
      +    Short = new_primitive_type("short")
      +    ShortA = new_array_type(new_pointer_type(Short), None)
      +    Char = new_primitive_type("char")
      +    CharA = new_array_type(new_pointer_type(Char), None)
      +    p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678])
      +    memmove(p, p + 1, 4)
      +    assert list(p) == [-2345, -3456, -3456, -4567, -5678]
      +    p[2] = 999
      +    memmove(p + 2, p, 6)
      +    assert list(p) == [-2345, -3456, -2345, -3456, 999]
      +    memmove(p + 4, newp(CharA, b"\x71\x72"), 2)
      +    if sys.byteorder == 'little':
      +        assert list(p) == [-2345, -3456, -2345, -3456, 0x7271]
      +    else:
      +        assert list(p) == [-2345, -3456, -2345, -3456, 0x7172]
      +
      +def test_memmove_buffer():
      +    import array
      +    Short = new_primitive_type("short")
      +    ShortA = new_array_type(new_pointer_type(Short), None)
      +    a = array.array('H', [10000, 20000, 30000])
      +    p = newp(ShortA, 5)
      +    memmove(p, a, 6)
      +    assert list(p) == [10000, 20000, 30000, 0, 0]
      +    memmove(p + 1, a, 6)
      +    assert list(p) == [10000, 10000, 20000, 30000, 0]
      +    b = array.array('h', [-1000, -2000, -3000])
      +    memmove(b, a, 4)
      +    assert b.tolist() == [10000, 20000, -3000]
      +    assert a.tolist() == [10000, 20000, 30000]
      +    p[0] = 999
      +    p[1] = 998
      +    p[2] = 997
      +    p[3] = 996
      +    p[4] = 995
      +    memmove(b, p, 2)
      +    assert b.tolist() == [999, 20000, -3000]
      +    memmove(b, p + 2, 4)
      +    assert b.tolist() == [997, 996, -3000]
      +    p[2] = -p[2]
      +    p[3] = -p[3]
      +    memmove(b, p + 2, 6)
      +    assert b.tolist() == [-997, -996, 995]
      +
      +def test_memmove_readonly_readwrite():
      +    SignedChar = new_primitive_type("signed char")
      +    SignedCharA = new_array_type(new_pointer_type(SignedChar), None)
      +    p = newp(SignedCharA, 5)
      +    memmove(p, b"abcde", 3)
      +    assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0]
      +    memmove(p, bytearray(b"ABCDE"), 2)
      +    assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0]
      +    py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3)
      +    ba = bytearray(b"xxxxx")
      +    memmove(dest=ba, src=p, n=3)
      +    assert ba == bytearray(b"ABcxx")
      +    memmove(ba, b"EFGH", 4)
      +    assert ba == bytearray(b"EFGHx")
      +
      +def test_memmove_sign_check():
      +    SignedChar = new_primitive_type("signed char")
      +    SignedCharA = new_array_type(new_pointer_type(SignedChar), None)
      +    p = newp(SignedCharA, 5)
      +    py.test.raises(ValueError, memmove, p, p + 1, -1)   # not segfault
      +
      +def test_memmove_bad_cdata():
      +    BInt = new_primitive_type("int")
      +    p = cast(BInt, 42)
      +    py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1)
      +    py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1)
      +
       def test_dereference_null_ptr():
           BInt = new_primitive_type("int")
           BIntPtr = new_pointer_type(BInt)
      @@ -3425,6 +3497,20 @@
                                   "that you are not e.g. mixing up different ffi "
                                   "instances)")
       
      -def test_version():
      -    # this test is here mostly for PyPy
      -    assert __version__ == "1.2.1"
      +def test_stdcall_function_type():
      +    assert FFI_CDECL == FFI_DEFAULT_ABI
      +    try:
      +        stdcall = FFI_STDCALL
      +    except NameError:
      +        stdcall = FFI_DEFAULT_ABI
      +    BInt = new_primitive_type("int")
      +    BFunc = new_function_type((BInt, BInt), BInt, False, stdcall)
      +    if stdcall != FFI_DEFAULT_ABI:
      +        assert repr(BFunc) == ""
      +    else:
      +        assert repr(BFunc) == ""
      +
      +def test_get_common_types():
      +    d = {}
      +    _get_common_types(d)
      +    assert d['bool'] == '_Bool'
      diff --git a/pypy/module/_frozen_importlib/__init__.py b/pypy/module/_frozen_importlib/__init__.py
      --- a/pypy/module/_frozen_importlib/__init__.py
      +++ b/pypy/module/_frozen_importlib/__init__.py
      @@ -30,7 +30,7 @@
                             space.wrap(space.builtin))
               code_w.exec_code(space, self.w_dict, self.w_dict)
       
      -        self.w_import = space.wrap(interp_import.import_with_frames_removed)
      +        self.w_import = space.wrap(interp_import.__import__)
       
           def startup(self, space):
               """Copy our __import__ to builtins."""
      diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py
      --- a/pypy/module/_frozen_importlib/interp_import.py
      +++ b/pypy/module/_frozen_importlib/interp_import.py
      @@ -2,7 +2,7 @@
       from pypy.interpreter.error import OperationError
       
       @interp2app
      -def import_with_frames_removed(space, __args__):
      +def __import__(space, __args__):
           try:
               return space.call_args(
                   space.getbuiltinmodule('_frozen_importlib').getdictvalue(
      diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
      --- a/pypy/module/_io/test/test_io.py
      +++ b/pypy/module/_io/test/test_io.py
      @@ -390,7 +390,7 @@
                           raises(TypeError, pickle.dumps, f, protocol)
       
           def test_mod(self):
      -        import _io
      +        import _io, _frozen_importlib
               typemods = dict((t, t.__module__) for t in vars(_io).values()
                               if isinstance(t, type))
               for t, mod in typemods.items():
      @@ -398,6 +398,11 @@
                       assert mod == 'builtins'
                   elif t is _io.UnsupportedOperation:
                       assert mod == 'io'
      +            #TODO: Make sure this is a reasonable thing to do. Check if there is
      +            #a cleaner way to do these checks or if these checks even make sense
      +            #in general. They seem really brittle.
      +            elif t is _frozen_importlib.BuiltinImporter:
      +                assert mod == "_frozen_importlib"
                   else:
                       assert mod == '_io'
       
      diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
      --- a/pypy/module/_ssl/interp_ssl.py
      +++ b/pypy/module/_ssl/interp_ssl.py
      @@ -1094,27 +1094,13 @@
           return space.str(space.getattr(w_exc, space.wrap("args")))
       
       
      -class W_Error(interp_exceptions.W_OSError):
      -    "An error occurred in the SSL implementation."
      -
      -    def descr_str(self, space):
      -        if space.isinstance_w(self.w_strerror, space.w_unicode):
      -            return self.w_strerror
      -        else:
      -            return space.str(space.newtuple(self.args_w))
      -
      -W_Error.typedef = TypeDef(
      -    "ssl.SSLError",
      -    interp_exceptions.W_OSError.typedef,
      -    __new__  = interp_exceptions._new(W_Error),
      -    __doc__  = W_Error.__doc__,
      -    __str__  = interp2app(W_Error.descr_str),
      -    )
      -
      -
       class ErrorCache:
           def __init__(self, space):
      -        self.w_error = space.gettypefor(W_Error)
      +        w_socketerror = interp_socket.get_error(space, "error")
      +        self.w_error = space.new_exception_class(
      +            "_ssl.SSLError", w_socketerror)
      +        space.setattr(self.w_error, space.wrap('__str__'),
      +                      space.wrap(interp2app(SSLError_descr_str)))
               self.w_ZeroReturnError = space.new_exception_class(
                   "ssl.SSLZeroReturnError", self.w_error)
               self.w_WantReadError = space.new_exception_class(
      diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
      --- a/pypy/module/imp/importing.py
      +++ b/pypy/module/imp/importing.py
      @@ -75,19 +75,25 @@
           w_mod = check_sys_modules_w(space, modulename)
           if w_mod:
               return w_mod
      -    if modulename in space.builtin_modules:
      -        return space.getbuiltinmodule(modulename)
      +    try:
      +        lock = getimportlock(space)
      +        lock.acquire_lock()
       
      -    ec = space.getexecutioncontext()
      -    with open(os.path.join(lib_pypy, modulename + '.py')) as fp:
      -        source = fp.read()
      -    pathname = "" % modulename
      -    code_w = ec.compiler.compile(source, pathname, 'exec', 0)
      -    w_mod = add_module(space, space.wrap(modulename))
      -    space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
      -    space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name)
      -    code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
      -    assert check_sys_modules_w(space, modulename)
      +        if modulename in space.builtin_modules:
      +            return space.getbuiltinmodule(modulename)
      +
      +        ec = space.getexecutioncontext()
      +        with open(os.path.join(lib_pypy, modulename + '.py')) as fp:
      +            source = fp.read()
      +        pathname = "" % modulename
      +        code_w = ec.compiler.compile(source, pathname, 'exec', 0)
      +        w_mod = add_module(space, space.wrap(modulename))
      +        space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
      +        space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name)
      +        code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
      +        assert check_sys_modules_w(space, modulename)
      +    finally:
      +        lock.release_lock(silent_after_fork=True)
           return w_mod
       
       
      diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
      --- a/pypy/module/imp/interp_imp.py
      +++ b/pypy/module/imp/interp_imp.py
      @@ -84,7 +84,9 @@
           name = space.str0_w(w_name)
           if name not in space.builtin_modules:
               return
      -    return space.getbuiltinmodule(name)
      +    # force_init is needed to make reload actually reload instead of just
      +    # using the already-present module in sys.modules.
      +    return space.getbuiltinmodule(name, force_init=True)
       
       def init_frozen(space, w_name):
           return None
      diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
      --- a/pypy/module/imp/test/test_import.py
      +++ b/pypy/module/imp/test/test_import.py
      @@ -499,7 +499,7 @@
               # Check relative fails when __package__ set to a non-string
               ns = dict(__package__=object())
               check_absolute()
      -        raises(ValueError, check_relative)
      +        raises(TypeError, check_relative)
       
           def test_import_function(self):
               # More tests for __import__
      @@ -636,7 +636,9 @@
           def test_del_from_sys_modules(self):
               try:
                   import del_sys_module
      -        except ImportError:
      +        #This raises a KeyError in cpython,
      +        #not an import error
      +        except KeyError:
                   pass    # ok
               else:
                   assert False, 'should not work'
      diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
      --- a/pypy/module/sys/test/test_sysmodule.py
      +++ b/pypy/module/sys/test/test_sysmodule.py
      @@ -32,7 +32,8 @@
               w_sys.flush_std_files(space)
       
               msg = space.bytes_w(space.call_function(w_read))
      -        assert 'Exception IOError' in msg
      +        # IOError has become an alias for OSError
      +        assert 'Exception OSError' in msg
           finally:
               space.setattr(w_sys, space.wrap('stdout'), w_sys.get('__stdout__'))
               space.setattr(w_sys, space.wrap('stderr'), w_sys.get('__stderr__'))
      diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py
      --- a/pypy/module/test_lib_pypy/test_code_module.py
      +++ b/pypy/module/test_lib_pypy/test_code_module.py
      @@ -21,7 +21,9 @@
       
           def test_cause_tb(self):
               interp = self.get_interp()
      -        interp.runsource('raise IOError from OSError')
      +        # (Arbitrarily) Changing to TypeError as IOError is now an alias of
      +        # OSError, making testing confusing
      +        interp.runsource('raise TypeError from OSError')
               result = interp.out.getvalue()
               expected_header = """OSError
       
      @@ -30,7 +32,7 @@
       Traceback (most recent call last):
       """
               assert expected_header in result
      -        assert result.endswith("IOError\n")
      +        assert result.endswith("TypeError\n")
       
           def test_context_tb(self):
               interp = self.get_interp()
      diff --git a/pypy/module/thread/test/test_import_lock.py b/pypy/module/thread/test/test_import_lock.py
      --- a/pypy/module/thread/test/test_import_lock.py
      +++ b/pypy/module/thread/test/test_import_lock.py
      @@ -96,14 +96,14 @@
                   original_acquire()
               importlock.count = 0
               monkeypatch.setattr(importlock, 'acquire_lock', acquire_lock)
      +        # An already imported module
       
      -        # An already imported module
               importhook(space, 'sys')
               assert importlock.count == 0
               # A new module
      -        importhook(space, '__future__')
      +        importhook(space, "time")
               assert importlock.count == 1
               # Import it again
               previous_count = importlock.count
      -        importhook(space, '__future__')
      +        importhook(space, "time")
               assert importlock.count == previous_count
      
      From pypy.commits at gmail.com  Wed Jan 27 18:25:11 2016
      From: pypy.commits at gmail.com (mjacob)
      Date: Wed, 27 Jan 2016 15:25:11 -0800 (PST)
      Subject: [pypy-commit] pypy py3.3: Redo 88aafcb7c318: hg merge py3k (+ fixes)
      Message-ID: <56a951d7.a3f6c20a.2ed55.2647@mx.google.com>
      
      Author: Manuel Jacob 
      Branch: py3.3
      Changeset: r81987:4fea3e2ca354
      Date: 2016-01-27 21:14 +0100
      http://bitbucket.org/pypy/pypy/changeset/4fea3e2ca354/
      
      Log:	Redo 88aafcb7c318: hg merge py3k (+ fixes)
      
      	That merge (88aafcb7c318) went wrong because someone merged from the
      	default branch directly into the py3.3 branch instead of merging it
      	into the py3k branch and then from the py3k branch into the py3.3
      	branch. This confused mercurial.
      
      	The actual command was: hg merge --config
      	merge.preferancestor=aeafe30eac60 6da866a9e7d5
      
      diff too long, truncating to 2000 out of 102662 lines
      
      diff --git a/.gitignore b/.gitignore
      --- a/.gitignore
      +++ b/.gitignore
      @@ -1,9 +1,14 @@
       .hg
       .svn
       
      +# VIM
      +*.swp
      +*.swo
      +
       *.pyc
       *.pyo
       *~
      +__pycache__/
       
       bin/pypy-c
       include/*.h
      @@ -22,4 +27,6 @@
       pypy/translator/goal/pypy-c
       pypy/translator/goal/target*-c
       release/
      +!pypy/tool/release/
       rpython/_cache/
      +__pycache__/
      diff --git a/.hgtags b/.hgtags
      --- a/.hgtags
      +++ b/.hgtags
      @@ -15,3 +15,5 @@
       e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
       e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
       295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0
      +f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1
      +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
      diff --git a/LICENSE b/LICENSE
      --- a/LICENSE
      +++ b/LICENSE
      @@ -56,14 +56,15 @@
         Anders Chrigstrom
         Eric van Riet Paap
         Wim Lavrijsen
      +  Richard Plangger
         Richard Emslie
         Alexander Schremmer
         Dan Villiom Podlaski Christiansen
         Lukas Diekmann
         Sven Hager
         Anders Lehmann
      +  Remi Meier
         Aurelien Campeas
      -  Remi Meier
         Niklaus Haldimann
         Camillo Bruni
         Laura Creighton
      @@ -87,7 +88,6 @@
         Ludovic Aubry
         Jacob Hallen
         Jason Creighton
      -  Richard Plangger
         Alex Martelli
         Michal Bendowski
         stian
      @@ -168,7 +168,6 @@
         Michael Twomey
         Lucian Branescu Mihaila
         Yichao Yu
      -  Anton Gulenko
         Gabriel Lavoie
         Olivier Dormond
         Jared Grubb
      @@ -201,9 +200,12 @@
         Alex Perry
         Vincent Legoll
         Alan McIntyre
      +  Spenser Bauman
         Alexander Sedov
         Attila Gobi
         Christopher Pope
      +  Devin Jeanpierre
      +  Vaibhav Sood
         Christian Tismer 
         Marc Abramowitz
         Dan Stromberg
      @@ -215,6 +217,7 @@
         Carl Meyer
         Karl Ramm
         Pieter Zieschang
      +  Anton Gulenko
         Gabriel
         Lukas Vacek
         Andrew Dalke
      @@ -234,6 +237,7 @@
         Lutz Paelike
         Lucio Torre
         Lars Wassermann
      +  Philipp Rustemeuer
         Henrik Vendelbo
         Dan Buch
         Miguel de Val Borro
      @@ -244,14 +248,17 @@
         Martin Blais
         Lene Wagner
         Tomo Cocoa
      +  Kim Jin Su
         Toni Mattis
         Lucas Stadler
         Julian Berman
      +  Markus Holtermann
         roberto at goyle
         Yury V. Zaytsev
         Anna Katrina Dominguez
         William Leslie
         Bobby Impollonia
      +  Faye Zhao
         timo at eistee.fritz.box
         Andrew Thompson
         Yusei Tahara
      @@ -282,6 +289,7 @@
         shoma hosaka
         Daniel Neuhäuser
         Ben Mather
      +  Niclas Olofsson
         halgari
         Boglarka Vezer
         Chris Pressey
      @@ -308,13 +316,16 @@
         Stefan Marr
         jiaaro
         Mads Kiilerich
      +  Richard Lancaster
         opassembler.py
         Antony Lee
      +  Yaroslav Fedevych
         Jim Hunziker
         Markus Unterwaditzer
         Even Wiik Thomassen
         jbs
         squeaky
      +  Zearin
         soareschen
         Kurt Griffiths
         Mike Bayer
      @@ -326,6 +337,7 @@
         Anna Ravencroft
         Andrey Churin
         Dan Crosta
      +  Tobias Diaz
         Julien Phalip
         Roman Podoliaka
         Dan Loewenherz
      @@ -352,8 +364,7 @@
       Except when otherwise stated (look for LICENSE files or copyright/license
       information at the beginning of each file) the files in the 'lib-python/2.7'
       directory are all copyrighted by the Python Software Foundation and licensed
      -under the Python Software License of which you can find a copy here:
      -http://www.python.org/doc/Copyright.html 
      +under the terms that you can find here: https://docs.python.org/2/license.html
       
       License for 'pypy/module/unicodedata/'
       ======================================
      @@ -430,12 +441,12 @@
       _gdbm module, provided in the file lib_pypy/_gdbm.py, is redistributed
       under the terms of the GPL license as well.
       
      -License for 'pypy/module/_vmprof/src'
      +License for 'rpython/rlib/rvmprof/src'
       --------------------------------------
       
       The code is based on gperftools. You may see a copy of the License for it at
       
      -    https://code.google.com/p/gperftools/source/browse/COPYING
      +    https://github.com/gperftools/gperftools/blob/master/COPYING
       
       License for 'liblzma and 'lzmaffi'
       ----------------------------------
      diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py
      --- a/dotviewer/graphclient.py
      +++ b/dotviewer/graphclient.py
      @@ -127,16 +127,8 @@
               return spawn_graphserver_handler((host, port))
       
       def spawn_local_handler():
      -    if hasattr(sys, 'pypy_objspaceclass'):
      -        # if 'python' is actually PyPy, e.g. in a virtualenv, then
      -        # try hard to find a real CPython
      -        try:
      -            python = subprocess.check_output(
      -                'env -i $SHELL -l -c "which python"', shell=True).strip()
      -        except subprocess.CalledProcessError:
      -            # did not work, fall back to 'python'
      -            python = 'python'
      -    else:
      +    python = os.getenv('PYPY_PYGAME_PYTHON')
      +    if not python:
               python = sys.executable
           args = [python, '-u', GRAPHSERVER, '--stdio']
           p = subprocess.Popen(args,
      diff --git a/lib_pypy/_dbm.py b/lib_pypy/_dbm.py
      --- a/lib_pypy/_dbm.py
      +++ b/lib_pypy/_dbm.py
      @@ -153,6 +153,7 @@
       _init_func('store', (c_void_p, datum, datum, c_int), restype=c_int)
       _init_func('error', (c_void_p,), restype=c_int)
       _init_func('delete', (c_void_p, datum), restype=c_int)
      +_init_func('clearerr', (c_void_p,), restype=c_int)
       
       lib.DBM_INSERT = 0
       lib.DBM_REPLACE = 1
      diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py
      --- a/lib_pypy/_tkinter/tklib_build.py
      +++ b/lib_pypy/_tkinter/tklib_build.py
      @@ -213,8 +213,8 @@
       #include 
       #endif 
       
      -char *get_tk_version() { return TK_VERSION; }
      -char *get_tcl_version() { return TCL_VERSION; }
      +char *get_tk_version(void) { return TK_VERSION; }
      +char *get_tcl_version(void) { return TCL_VERSION; }
       """ % globals(),
       include_dirs=incdirs,
       libraries=linklibs,
      diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
      --- a/lib_pypy/cffi.egg-info/PKG-INFO
      +++ b/lib_pypy/cffi.egg-info/PKG-INFO
      @@ -1,6 +1,6 @@
       Metadata-Version: 1.1
       Name: cffi
      -Version: 1.2.1
      +Version: 1.3.1
       Summary: Foreign Function Interface for Python calling C code.
       Home-page: http://cffi.readthedocs.org
       Author: Armin Rigo, Maciej Fijalkowski
      diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
      --- a/lib_pypy/cffi/__init__.py
      +++ b/lib_pypy/cffi/__init__.py
      @@ -4,8 +4,8 @@
       from .api import FFI, CDefError, FFIError
       from .ffiplatform import VerificationError, VerificationMissing
       
      -__version__ = "1.2.1"
      -__version_info__ = (1, 2, 1)
      +__version__ = "1.3.1"
      +__version_info__ = (1, 3, 1)
       
       # The verifier module file names are based on the CRC32 of a string that
       # contains the following version number.  It may be older than __version__
      diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
      --- a/lib_pypy/cffi/_cffi_include.h
      +++ b/lib_pypy/cffi/_cffi_include.h
      @@ -214,6 +214,12 @@
            (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) :    \
            _CFFI__UNKNOWN_PRIM)
       
      +#define _cffi_prim_float(size)                                          \
      +    ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT :                       \
      +     (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE :                     \
      +     (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE :       \
      +     _CFFI__UNKNOWN_FLOAT_PRIM)
      +
       #define _cffi_check_int(got, got_nonpos, expected)      \
           ((got_nonpos) == (expected <= 0) &&                 \
            (got) == (unsigned long long)expected)
      diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
      --- a/lib_pypy/cffi/_pycparser/__init__.py
      +++ b/lib_pypy/cffi/_pycparser/__init__.py
      @@ -4,11 +4,11 @@
       # This package file exports some convenience functions for
       # interacting with pycparser
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #-----------------------------------------------------------------
       __all__ = ['c_lexer', 'c_parser', 'c_ast']
      -__version__ = '2.10'
      +__version__ = '2.14'
       
       from subprocess import Popen, PIPE
       from .c_parser import CParser
      @@ -91,4 +91,3 @@
           if parser is None:
               parser = CParser()
           return parser.parse(text, filename)
      -
      diff --git a/lib_pypy/cffi/_pycparser/_ast_gen.py b/lib_pypy/cffi/_pycparser/_ast_gen.py
      --- a/lib_pypy/cffi/_pycparser/_ast_gen.py
      +++ b/lib_pypy/cffi/_pycparser/_ast_gen.py
      @@ -1,13 +1,13 @@
       #-----------------------------------------------------------------
       # _ast_gen.py
       #
      -# Generates the AST Node classes from a specification given in 
      -# a .yaml file
      +# Generates the AST Node classes from a specification given in
      +# a configuration file
       #
       # The design of this module was inspired by astgen.py from the
       # Python 2.5 code-base.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #-----------------------------------------------------------------
       import pprint
      @@ -20,7 +20,7 @@
                   file.
               """
               self.cfg_filename = cfg_filename
      -        self.node_cfg = [NodeCfg(name, contents) 
      +        self.node_cfg = [NodeCfg(name, contents)
                   for (name, contents) in self.parse_cfgfile(cfg_filename)]
       
           def generate(self, file=None):
      @@ -28,11 +28,11 @@
               """
               src = Template(_PROLOGUE_COMMENT).substitute(
                   cfg_filename=self.cfg_filename)
      -        
      +
               src += _PROLOGUE_CODE
               for node_cfg in self.node_cfg:
                   src += node_cfg.generate_source() + '\n\n'
      -        
      +
               file.write(src)
       
           def parse_cfgfile(self, filename):
      @@ -57,10 +57,10 @@
       
       
       class NodeCfg(object):
      -    """ Node configuration. 
      +    """ Node configuration.
       
               name: node name
      -        contents: a list of contents - attributes and child nodes 
      +        contents: a list of contents - attributes and child nodes
               See comment at the top of the configuration file for details.
           """
           def __init__(self, name, contents):
      @@ -73,7 +73,7 @@
               for entry in contents:
                   clean_entry = entry.rstrip('*')
                   self.all_entries.append(clean_entry)
      -            
      +
                   if entry.endswith('**'):
                       self.seq_child.append(clean_entry)
                   elif entry.endswith('*'):
      @@ -86,26 +86,30 @@
               src += '\n' + self._gen_children()
               src += '\n' + self._gen_attr_names()
               return src
      -    
      +
           def _gen_init(self):
               src = "class %s(Node):\n" % self.name
       
               if self.all_entries:
                   args = ', '.join(self.all_entries)
      +            slots = ', '.join("'{0}'".format(e) for e in self.all_entries)
      +            slots += ", 'coord', '__weakref__'"
                   arglist = '(self, %s, coord=None)' % args
               else:
      +            slots = "'coord', '__weakref__'"
                   arglist = '(self, coord=None)'
      -        
      +
      +        src += "    __slots__ = (%s)\n" % slots
               src += "    def __init__%s:\n" % arglist
      -        
      +
               for name in self.all_entries + ['coord']:
                   src += "        self.%s = %s\n" % (name, name)
      -        
      +
               return src
       
           def _gen_children(self):
               src = '    def children(self):\n'
      -        
      +
               if self.all_entries:
                   src += '        nodelist = []\n'
       
      @@ -114,21 +118,21 @@
                           '        if self.%(child)s is not None:' +
                           ' nodelist.append(("%(child)s", self.%(child)s))\n') % (
                               dict(child=child))
      -                
      +
                   for seq_child in self.seq_child:
                       src += (
                           '        for i, child in enumerate(self.%(child)s or []):\n'
                           '            nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
                               dict(child=seq_child))
      -                    
      +
                   src += '        return tuple(nodelist)\n'
               else:
                   src += '        return ()\n'
      -            
      -        return src        
      +
      +        return src
       
           def _gen_attr_names(self):
      -        src = "    attr_names = (" + ''.join("%r," % nm for nm in self.attr) + ')' 
      +        src = "    attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')'
               return src
       
       
      @@ -136,7 +140,7 @@
       r'''#-----------------------------------------------------------------
       # ** ATTENTION **
       # This code was automatically generated from the file:
      -# $cfg_filename 
      +# $cfg_filename
       #
       # Do not modify it directly. Modify the configuration file and
       # run the generator again.
      @@ -146,7 +150,7 @@
       #
       # AST Node classes.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #-----------------------------------------------------------------
       
      @@ -157,6 +161,7 @@
       
       
       class Node(object):
      +    __slots__ = ()
           """ Abstract base class for AST nodes.
           """
           def children(self):
      @@ -167,21 +172,21 @@
           def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
               """ Pretty print the Node and all its attributes and
                   children (recursively) to a buffer.
      -            
      -            buf:   
      +
      +            buf:
                       Open IO buffer into which the Node is printed.
      -            
      -            offset: 
      -                Initial offset (amount of leading spaces) 
      -            
      +
      +            offset:
      +                Initial offset (amount of leading spaces)
      +
                   attrnames:
                       True if you want to see the attribute names in
                       name=value pairs. False to only see the values.
      -                
      +
                   nodenames:
      -                True if you want to see the actual node names 
      +                True if you want to see the actual node names
                       within their parents.
      -            
      +
                   showcoord:
                       Do you want the coordinates of each Node to be
                       displayed.
      @@ -216,47 +221,47 @@
       
       
       class NodeVisitor(object):
      -    """ A base NodeVisitor class for visiting c_ast nodes. 
      +    """ A base NodeVisitor class for visiting c_ast nodes.
               Subclass it and define your own visit_XXX methods, where
      -        XXX is the class name you want to visit with these 
      +        XXX is the class name you want to visit with these
               methods.
      -        
      +
               For example:
      -        
      +
               class ConstantVisitor(NodeVisitor):
                   def __init__(self):
                       self.values = []
      -            
      +
                   def visit_Constant(self, node):
                       self.values.append(node.value)
       
      -        Creates a list of values of all the constant nodes 
      +        Creates a list of values of all the constant nodes
               encountered below the given node. To use it:
      -        
      +
               cv = ConstantVisitor()
               cv.visit(node)
      -        
      +
               Notes:
      -        
      -        *   generic_visit() will be called for AST nodes for which 
      -            no visit_XXX method was defined. 
      -        *   The children of nodes for which a visit_XXX was 
      +
      +        *   generic_visit() will be called for AST nodes for which
      +            no visit_XXX method was defined.
      +        *   The children of nodes for which a visit_XXX was
                   defined will not be visited - if you need this, call
      -            generic_visit() on the node. 
      +            generic_visit() on the node.
                   You can use:
                       NodeVisitor.generic_visit(self, node)
               *   Modeled after Python's own AST visiting facilities
                   (the ast module of Python 3.0)
           """
           def visit(self, node):
      -        """ Visit a node. 
      +        """ Visit a node.
               """
               method = 'visit_' + node.__class__.__name__
               visitor = getattr(self, method, self.generic_visit)
               return visitor(node)
      -        
      +
           def generic_visit(self, node):
      -        """ Called if no explicit visitor function exists for a 
      +        """ Called if no explicit visitor function exists for a
                   node. Implements preorder visiting of the node.
               """
               for c_name, c in node.children():
      diff --git a/lib_pypy/cffi/_pycparser/_build_tables.py b/lib_pypy/cffi/_pycparser/_build_tables.py
      --- a/lib_pypy/cffi/_pycparser/_build_tables.py
      +++ b/lib_pypy/cffi/_pycparser/_build_tables.py
      @@ -6,12 +6,11 @@
       # Also generates AST code from the configuration file.
       # Should be called from the pycparser directory.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #-----------------------------------------------------------------
       
       # Generate c_ast.py
      -#
       from _ast_gen import ASTCodeGenerator
       ast_gen = ASTCodeGenerator('_c_ast.cfg')
       ast_gen.generate(open('c_ast.py', 'w'))
      diff --git a/lib_pypy/cffi/_pycparser/_c_ast.cfg b/lib_pypy/cffi/_pycparser/_c_ast.cfg
      --- a/lib_pypy/cffi/_pycparser/_c_ast.cfg
      +++ b/lib_pypy/cffi/_pycparser/_c_ast.cfg
      @@ -1,188 +1,189 @@
      -#-----------------------------------------------------------------
      -# pycparser: _c_ast_gen.cfg
      -#
      -# Defines the AST Node classes used in pycparser.
      -# 
      -# Each entry is a Node sub-class name, listing the attributes
      -# and child nodes of the class:
      -#   *     - a child node
      -#   **    - a sequence of child nodes
      -#         - an attribute
      -#
      -# Copyright (C) 2008-2012, Eli Bendersky
      -# License: BSD
      -#-----------------------------------------------------------------
      -
      -ArrayDecl: [type*, dim*]
      -
      -ArrayRef: [name*, subscript*]
      -
      -# op: =, +=, /= etc.
      -#
      -Assignment: [op, lvalue*, rvalue*]
      -
      -BinaryOp: [op, left*, right*]
      -
      -Break: []
      -
      -Case: [expr*, stmts**]
      -
      -Cast: [to_type*, expr*]
      -
      -# Compound statement in C99 is a list of block items (declarations or
      -# statements).
      -#
      -Compound: [block_items**]
      -
      -# Compound literal (anonymous aggregate) for C99.
      -# (type-name) {initializer_list}
      -# type: the typename
      -# init: InitList for the initializer list
      -#
      -CompoundLiteral: [type*, init*]
      -
      -# type: int, char, float, etc. see CLexer for constant token types
      -#
      -Constant: [type, value]
      -
      -Continue: []
      -
      -# name: the variable being declared
      -# quals: list of qualifiers (const, volatile)
      -# funcspec: list function specifiers (i.e. inline in C99)
      -# storage: list of storage specifiers (extern, register, etc.)
      -# type: declaration type (probably nested with all the modifiers)
      -# init: initialization value, or None
      -# bitsize: bit field size, or None
      -#
      -Decl: [name, quals, storage, funcspec, type*, init*, bitsize*]
      -
      -DeclList: [decls**]
      -
      -Default: [stmts**]
      -
      -DoWhile: [cond*, stmt*]
      -
      -# Represents the ellipsis (...) parameter in a function 
      -# declaration
      -#
      -EllipsisParam: []
      -
      -# An empty statement (a semicolon ';' on its own)
      -#
      -EmptyStatement: []
      -
      -# Enumeration type specifier
      -# name: an optional ID
      -# values: an EnumeratorList
      -#
      -Enum: [name, values*]
      -
      -# A name/value pair for enumeration values
      -#
      -Enumerator: [name, value*]
      -
      -# A list of enumerators
      -#
      -EnumeratorList: [enumerators**]
      -
      -# A list of expressions separated by the comma operator.
      -#
      -ExprList: [exprs**]
      -
      -# This is the top of the AST, representing a single C file (a 
      -# translation unit in K&R jargon). It contains a list of 
      -# "external-declaration"s, which is either declarations (Decl),
      -# Typedef or function definitions (FuncDef).
      -# 
      -FileAST: [ext**]
      -
      -# for (init; cond; next) stmt
      -#
      -For: [init*, cond*, next*, stmt*]
      -
      -# name: Id
      -# args: ExprList
      -#
      -FuncCall: [name*, args*]
      -
      -# type (args)
      -#
      -FuncDecl: [args*, type*]
      -
      -# Function definition: a declarator for the function name and
      -# a body, which is a compound statement. 
      -# There's an optional list of parameter declarations for old
      -# K&R-style definitions
      -#
      -FuncDef: [decl*, param_decls**, body*]
      -
      -Goto: [name]
      -
      -ID: [name]
      -
      -# Holder for types that are a simple identifier (e.g. the built
      -# ins void, char etc. and typedef-defined types)
      -#
      -IdentifierType: [names]
      -
      -If: [cond*, iftrue*, iffalse*]
      -
      -# An initialization list used for compound literals.
      -#
      -InitList: [exprs**]
      -
      -Label: [name, stmt*]
      -
      -# A named initializer for C99.
      -# The name of a NamedInitializer is a sequence of Nodes, because
      -# names can be hierarchical and contain constant expressions.
      -#
      -NamedInitializer: [name**, expr*]
      -
      -# a list of comma separated function parameter declarations
      -#
      -ParamList: [params**]
      -
      -PtrDecl: [quals, type*]
      -
      -Return: [expr*]
      -
      -# name: struct tag name
      -# decls: declaration of members
      -#
      -Struct: [name, decls**]
      -
      -# type: . or ->
      -# name.field or name->field
      -#
      -StructRef: [name*, type, field*]
      -
      -Switch: [cond*, stmt*]
      -
      -# cond ? iftrue : iffalse
      -#
      -TernaryOp: [cond*, iftrue*, iffalse*]
      -
      -# A base type declaration
      -#
      -TypeDecl: [declname, quals, type*]
      -
      -# A typedef declaration.
      -# Very similar to Decl, but without some attributes
      -#
      -Typedef: [name, quals, storage, type*]
      -
      -Typename: [quals, type*]
      -
      -UnaryOp: [op, expr*]
      -
      -# name: union tag name
      -# decls: declaration of members
      -#
      -Union: [name, decls**]
      -
      -While: [cond*, stmt*]
      -
      -
      -
      +#-----------------------------------------------------------------
      +# pycparser: _c_ast.cfg
      +#
      +# Defines the AST Node classes used in pycparser.
      +#
      +# Each entry is a Node sub-class name, listing the attributes
      +# and child nodes of the class:
      +#   *     - a child node
      +#   **    - a sequence of child nodes
      +#         - an attribute
      +#
      +# Copyright (C) 2008-2015, Eli Bendersky
      +# License: BSD
      +#-----------------------------------------------------------------
      +
      +# ArrayDecl is a nested declaration of an array with the given type.
      +# dim: the dimension (for example, constant 42)
      +# dim_quals: list of dimension qualifiers, to support C99's allowing 'const'
      +#            and 'static' within the array dimension in function declarations.
      +ArrayDecl: [type*, dim*, dim_quals]
      +
      +ArrayRef: [name*, subscript*]
      +
      +# op: =, +=, /= etc.
      +#
      +Assignment: [op, lvalue*, rvalue*]
      +
      +BinaryOp: [op, left*, right*]
      +
      +Break: []
      +
      +Case: [expr*, stmts**]
      +
      +Cast: [to_type*, expr*]
      +
      +# Compound statement in C99 is a list of block items (declarations or
      +# statements).
      +#
      +Compound: [block_items**]
      +
      +# Compound literal (anonymous aggregate) for C99.
      +# (type-name) {initializer_list}
      +# type: the typename
      +# init: InitList for the initializer list
      +#
      +CompoundLiteral: [type*, init*]
      +
      +# type: int, char, float, etc. see CLexer for constant token types
      +#
      +Constant: [type, value]
      +
      +Continue: []
      +
      +# name: the variable being declared
      +# quals: list of qualifiers (const, volatile)
      +# funcspec: list function specifiers (i.e. inline in C99)
      +# storage: list of storage specifiers (extern, register, etc.)
      +# type: declaration type (probably nested with all the modifiers)
      +# init: initialization value, or None
      +# bitsize: bit field size, or None
      +#
      +Decl: [name, quals, storage, funcspec, type*, init*, bitsize*]
      +
      +DeclList: [decls**]
      +
      +Default: [stmts**]
      +
      +DoWhile: [cond*, stmt*]
      +
      +# Represents the ellipsis (...) parameter in a function
      +# declaration
      +#
      +EllipsisParam: []
      +
      +# An empty statement (a semicolon ';' on its own)
      +#
      +EmptyStatement: []
      +
      +# Enumeration type specifier
      +# name: an optional ID
      +# values: an EnumeratorList
      +#
      +Enum: [name, values*]
      +
      +# A name/value pair for enumeration values
      +#
      +Enumerator: [name, value*]
      +
      +# A list of enumerators
      +#
      +EnumeratorList: [enumerators**]
      +
      +# A list of expressions separated by the comma operator.
      +#
      +ExprList: [exprs**]
      +
      +# This is the top of the AST, representing a single C file (a
      +# translation unit in K&R jargon). It contains a list of
      +# "external-declaration"s, which is either declarations (Decl),
      +# Typedef or function definitions (FuncDef).
      +#
      +FileAST: [ext**]
      +
      +# for (init; cond; next) stmt
      +#
      +For: [init*, cond*, next*, stmt*]
      +
      +# name: Id
      +# args: ExprList
      +#
      +FuncCall: [name*, args*]
      +
      +# type (args)
      +#
      +FuncDecl: [args*, type*]
      +
      +# Function definition: a declarator for the function name and
      +# a body, which is a compound statement.
      +# There's an optional list of parameter declarations for old
      +# K&R-style definitions
      +#
      +FuncDef: [decl*, param_decls**, body*]
      +
      +Goto: [name]
      +
      +ID: [name]
      +
      +# Holder for types that are a simple identifier (e.g. the built
      +# ins void, char etc. and typedef-defined types)
      +#
      +IdentifierType: [names]
      +
      +If: [cond*, iftrue*, iffalse*]
      +
      +# An initialization list used for compound literals.
      +#
      +InitList: [exprs**]
      +
      +Label: [name, stmt*]
      +
      +# A named initializer for C99.
      +# The name of a NamedInitializer is a sequence of Nodes, because
      +# names can be hierarchical and contain constant expressions.
      +#
      +NamedInitializer: [name**, expr*]
      +
      +# a list of comma separated function parameter declarations
      +#
      +ParamList: [params**]
      +
      +PtrDecl: [quals, type*]
      +
      +Return: [expr*]
      +
      +# name: struct tag name
      +# decls: declaration of members
      +#
      +Struct: [name, decls**]
      +
      +# type: . or ->
      +# name.field or name->field
      +#
      +StructRef: [name*, type, field*]
      +
      +Switch: [cond*, stmt*]
      +
      +# cond ? iftrue : iffalse
      +#
      +TernaryOp: [cond*, iftrue*, iffalse*]
      +
      +# A base type declaration
      +#
      +TypeDecl: [declname, quals, type*]
      +
      +# A typedef declaration.
      +# Very similar to Decl, but without some attributes
      +#
      +Typedef: [name, quals, storage, type*]
      +
      +Typename: [name, quals, type*]
      +
      +UnaryOp: [op, expr*]
      +
      +# name: union tag name
      +# decls: declaration of members
      +#
      +Union: [name, decls**]
      +
      +While: [cond*, stmt*]
      diff --git a/lib_pypy/cffi/_pycparser/ast_transforms.py b/lib_pypy/cffi/_pycparser/ast_transforms.py
      --- a/lib_pypy/cffi/_pycparser/ast_transforms.py
      +++ b/lib_pypy/cffi/_pycparser/ast_transforms.py
      @@ -3,7 +3,7 @@
       #
       # Some utilities used by the parser to create a friendlier AST.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #------------------------------------------------------------------------------
       
      @@ -84,7 +84,7 @@
                   _extract_nested_case(child, new_compound.block_items)
                   last_case = new_compound.block_items[-1]
               else:
      -            # Other statements are added as childrent to the last case, if it
      +            # Other statements are added as children to the last case, if it
                   # exists.
                   if last_case is None:
                       new_compound.block_items.append(child)
      diff --git a/lib_pypy/cffi/_pycparser/c_ast.py b/lib_pypy/cffi/_pycparser/c_ast.py
      --- a/lib_pypy/cffi/_pycparser/c_ast.py
      +++ b/lib_pypy/cffi/_pycparser/c_ast.py
      @@ -1,7 +1,7 @@
       #-----------------------------------------------------------------
       # ** ATTENTION **
       # This code was automatically generated from the file:
      -# _c_ast.cfg 
      +# _c_ast.cfg
       #
       # Do not modify it directly. Modify the configuration file and
       # run the generator again.
      @@ -11,7 +11,7 @@
       #
       # AST Node classes.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #-----------------------------------------------------------------
       
      @@ -20,6 +20,7 @@
       
       
       class Node(object):
      +    __slots__ = ()
           """ Abstract base class for AST nodes.
           """
           def children(self):
      @@ -30,21 +31,21 @@
           def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
               """ Pretty print the Node and all its attributes and
                   children (recursively) to a buffer.
      -            
      -            buf:   
      +
      +            buf:
                       Open IO buffer into which the Node is printed.
      -            
      -            offset: 
      -                Initial offset (amount of leading spaces) 
      -            
      +
      +            offset:
      +                Initial offset (amount of leading spaces)
      +
                   attrnames:
                       True if you want to see the attribute names in
                       name=value pairs. False to only see the values.
      -                
      +
                   nodenames:
      -                True if you want to see the actual node names 
      +                True if you want to see the actual node names
                       within their parents.
      -            
      +
                   showcoord:
                       Do you want the coordinates of each Node to be
                       displayed.
      @@ -79,47 +80,47 @@
       
       
       class NodeVisitor(object):
      -    """ A base NodeVisitor class for visiting c_ast nodes. 
      +    """ A base NodeVisitor class for visiting c_ast nodes.
               Subclass it and define your own visit_XXX methods, where
      -        XXX is the class name you want to visit with these 
      +        XXX is the class name you want to visit with these
               methods.
      -        
      +
               For example:
      -        
      +
               class ConstantVisitor(NodeVisitor):
                   def __init__(self):
                       self.values = []
      -            
      +
                   def visit_Constant(self, node):
                       self.values.append(node.value)
       
      -        Creates a list of values of all the constant nodes 
      +        Creates a list of values of all the constant nodes
               encountered below the given node. To use it:
      -        
      +
               cv = ConstantVisitor()
               cv.visit(node)
      -        
      +
               Notes:
      -        
      -        *   generic_visit() will be called for AST nodes for which 
      -            no visit_XXX method was defined. 
      -        *   The children of nodes for which a visit_XXX was 
      +
      +        *   generic_visit() will be called for AST nodes for which
      +            no visit_XXX method was defined.
      +        *   The children of nodes for which a visit_XXX was
                   defined will not be visited - if you need this, call
      -            generic_visit() on the node. 
      +            generic_visit() on the node.
                   You can use:
                       NodeVisitor.generic_visit(self, node)
               *   Modeled after Python's own AST visiting facilities
                   (the ast module of Python 3.0)
           """
           def visit(self, node):
      -        """ Visit a node. 
      +        """ Visit a node.
               """
               method = 'visit_' + node.__class__.__name__
               visitor = getattr(self, method, self.generic_visit)
               return visitor(node)
      -        
      +
           def generic_visit(self, node):
      -        """ Called if no explicit visitor function exists for a 
      +        """ Called if no explicit visitor function exists for a
                   node. Implements preorder visiting of the node.
               """
               for c_name, c in node.children():
      @@ -127,9 +128,11 @@
       
       
       class ArrayDecl(Node):
      -    def __init__(self, type, dim, coord=None):
      +    __slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__')
      +    def __init__(self, type, dim, dim_quals, coord=None):
               self.type = type
               self.dim = dim
      +        self.dim_quals = dim_quals
               self.coord = coord
       
           def children(self):
      @@ -138,9 +141,10 @@
               if self.dim is not None: nodelist.append(("dim", self.dim))
               return tuple(nodelist)
       
      -    attr_names = ()
      +    attr_names = ('dim_quals', )
       
       class ArrayRef(Node):
      +    __slots__ = ('name', 'subscript', 'coord', '__weakref__')
           def __init__(self, name, subscript, coord=None):
               self.name = name
               self.subscript = subscript
      @@ -155,6 +159,7 @@
           attr_names = ()
       
       class Assignment(Node):
      +    __slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__')
           def __init__(self, op, lvalue, rvalue, coord=None):
               self.op = op
               self.lvalue = lvalue
      @@ -167,9 +172,10 @@
               if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue))
               return tuple(nodelist)
       
      -    attr_names = ('op',)
      +    attr_names = ('op', )
       
       class BinaryOp(Node):
      +    __slots__ = ('op', 'left', 'right', 'coord', '__weakref__')
           def __init__(self, op, left, right, coord=None):
               self.op = op
               self.left = left
      @@ -182,9 +188,10 @@
               if self.right is not None: nodelist.append(("right", self.right))
               return tuple(nodelist)
       
      -    attr_names = ('op',)
      +    attr_names = ('op', )
       
       class Break(Node):
      +    __slots__ = ('coord', '__weakref__')
           def __init__(self, coord=None):
               self.coord = coord
       
      @@ -194,6 +201,7 @@
           attr_names = ()
       
       class Case(Node):
      +    __slots__ = ('expr', 'stmts', 'coord', '__weakref__')
           def __init__(self, expr, stmts, coord=None):
               self.expr = expr
               self.stmts = stmts
      @@ -209,6 +217,7 @@
           attr_names = ()
       
       class Cast(Node):
      +    __slots__ = ('to_type', 'expr', 'coord', '__weakref__')
           def __init__(self, to_type, expr, coord=None):
               self.to_type = to_type
               self.expr = expr
      @@ -223,6 +232,7 @@
           attr_names = ()
       
       class Compound(Node):
      +    __slots__ = ('block_items', 'coord', '__weakref__')
           def __init__(self, block_items, coord=None):
               self.block_items = block_items
               self.coord = coord
      @@ -236,6 +246,7 @@
           attr_names = ()
       
       class CompoundLiteral(Node):
      +    __slots__ = ('type', 'init', 'coord', '__weakref__')
           def __init__(self, type, init, coord=None):
               self.type = type
               self.init = init
      @@ -250,6 +261,7 @@
           attr_names = ()
       
       class Constant(Node):
      +    __slots__ = ('type', 'value', 'coord', '__weakref__')
           def __init__(self, type, value, coord=None):
               self.type = type
               self.value = value
      @@ -259,9 +271,10 @@
               nodelist = []
               return tuple(nodelist)
       
      -    attr_names = ('type','value',)
      +    attr_names = ('type', 'value', )
       
       class Continue(Node):
      +    __slots__ = ('coord', '__weakref__')
           def __init__(self, coord=None):
               self.coord = coord
       
      @@ -271,6 +284,7 @@
           attr_names = ()
       
       class Decl(Node):
      +    __slots__ = ('name', 'quals', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__')
           def __init__(self, name, quals, storage, funcspec, type, init, bitsize, coord=None):
               self.name = name
               self.quals = quals
      @@ -288,9 +302,10 @@
               if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize))
               return tuple(nodelist)
       
      -    attr_names = ('name','quals','storage','funcspec',)
      +    attr_names = ('name', 'quals', 'storage', 'funcspec', )
       
       class DeclList(Node):
      +    __slots__ = ('decls', 'coord', '__weakref__')
           def __init__(self, decls, coord=None):
               self.decls = decls
               self.coord = coord
      @@ -304,6 +319,7 @@
           attr_names = ()
       
       class Default(Node):
      +    __slots__ = ('stmts', 'coord', '__weakref__')
           def __init__(self, stmts, coord=None):
               self.stmts = stmts
               self.coord = coord
      @@ -317,6 +333,7 @@
           attr_names = ()
       
       class DoWhile(Node):
      +    __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
           def __init__(self, cond, stmt, coord=None):
               self.cond = cond
               self.stmt = stmt
      @@ -331,6 +348,7 @@
           attr_names = ()
       
       class EllipsisParam(Node):
      +    __slots__ = ('coord', '__weakref__')
           def __init__(self, coord=None):
               self.coord = coord
       
      @@ -340,6 +358,7 @@
           attr_names = ()
       
       class EmptyStatement(Node):
      +    __slots__ = ('coord', '__weakref__')
           def __init__(self, coord=None):
               self.coord = coord
       
      @@ -349,6 +368,7 @@
           attr_names = ()
       
       class Enum(Node):
      +    __slots__ = ('name', 'values', 'coord', '__weakref__')
           def __init__(self, name, values, coord=None):
               self.name = name
               self.values = values
      @@ -359,9 +379,10 @@
               if self.values is not None: nodelist.append(("values", self.values))
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class Enumerator(Node):
      +    __slots__ = ('name', 'value', 'coord', '__weakref__')
           def __init__(self, name, value, coord=None):
               self.name = name
               self.value = value
      @@ -372,9 +393,10 @@
               if self.value is not None: nodelist.append(("value", self.value))
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class EnumeratorList(Node):
      +    __slots__ = ('enumerators', 'coord', '__weakref__')
           def __init__(self, enumerators, coord=None):
               self.enumerators = enumerators
               self.coord = coord
      @@ -388,6 +410,7 @@
           attr_names = ()
       
       class ExprList(Node):
      +    __slots__ = ('exprs', 'coord', '__weakref__')
           def __init__(self, exprs, coord=None):
               self.exprs = exprs
               self.coord = coord
      @@ -401,6 +424,7 @@
           attr_names = ()
       
       class FileAST(Node):
      +    __slots__ = ('ext', 'coord', '__weakref__')
           def __init__(self, ext, coord=None):
               self.ext = ext
               self.coord = coord
      @@ -414,6 +438,7 @@
           attr_names = ()
       
       class For(Node):
      +    __slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__')
           def __init__(self, init, cond, next, stmt, coord=None):
               self.init = init
               self.cond = cond
      @@ -432,6 +457,7 @@
           attr_names = ()
       
       class FuncCall(Node):
      +    __slots__ = ('name', 'args', 'coord', '__weakref__')
           def __init__(self, name, args, coord=None):
               self.name = name
               self.args = args
      @@ -446,6 +472,7 @@
           attr_names = ()
       
       class FuncDecl(Node):
      +    __slots__ = ('args', 'type', 'coord', '__weakref__')
           def __init__(self, args, type, coord=None):
               self.args = args
               self.type = type
      @@ -460,6 +487,7 @@
           attr_names = ()
       
       class FuncDef(Node):
      +    __slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__')
           def __init__(self, decl, param_decls, body, coord=None):
               self.decl = decl
               self.param_decls = param_decls
      @@ -477,6 +505,7 @@
           attr_names = ()
       
       class Goto(Node):
      +    __slots__ = ('name', 'coord', '__weakref__')
           def __init__(self, name, coord=None):
               self.name = name
               self.coord = coord
      @@ -485,9 +514,10 @@
               nodelist = []
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class ID(Node):
      +    __slots__ = ('name', 'coord', '__weakref__')
           def __init__(self, name, coord=None):
               self.name = name
               self.coord = coord
      @@ -496,9 +526,10 @@
               nodelist = []
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class IdentifierType(Node):
      +    __slots__ = ('names', 'coord', '__weakref__')
           def __init__(self, names, coord=None):
               self.names = names
               self.coord = coord
      @@ -507,9 +538,10 @@
               nodelist = []
               return tuple(nodelist)
       
      -    attr_names = ('names',)
      +    attr_names = ('names', )
       
       class If(Node):
      +    __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
           def __init__(self, cond, iftrue, iffalse, coord=None):
               self.cond = cond
               self.iftrue = iftrue
      @@ -526,6 +558,7 @@
           attr_names = ()
       
       class InitList(Node):
      +    __slots__ = ('exprs', 'coord', '__weakref__')
           def __init__(self, exprs, coord=None):
               self.exprs = exprs
               self.coord = coord
      @@ -539,6 +572,7 @@
           attr_names = ()
       
       class Label(Node):
      +    __slots__ = ('name', 'stmt', 'coord', '__weakref__')
           def __init__(self, name, stmt, coord=None):
               self.name = name
               self.stmt = stmt
      @@ -549,9 +583,10 @@
               if self.stmt is not None: nodelist.append(("stmt", self.stmt))
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class NamedInitializer(Node):
      +    __slots__ = ('name', 'expr', 'coord', '__weakref__')
           def __init__(self, name, expr, coord=None):
               self.name = name
               self.expr = expr
      @@ -567,6 +602,7 @@
           attr_names = ()
       
       class ParamList(Node):
      +    __slots__ = ('params', 'coord', '__weakref__')
           def __init__(self, params, coord=None):
               self.params = params
               self.coord = coord
      @@ -580,6 +616,7 @@
           attr_names = ()
       
       class PtrDecl(Node):
      +    __slots__ = ('quals', 'type', 'coord', '__weakref__')
           def __init__(self, quals, type, coord=None):
               self.quals = quals
               self.type = type
      @@ -590,9 +627,10 @@
               if self.type is not None: nodelist.append(("type", self.type))
               return tuple(nodelist)
       
      -    attr_names = ('quals',)
      +    attr_names = ('quals', )
       
       class Return(Node):
      +    __slots__ = ('expr', 'coord', '__weakref__')
           def __init__(self, expr, coord=None):
               self.expr = expr
               self.coord = coord
      @@ -605,6 +643,7 @@
           attr_names = ()
       
       class Struct(Node):
      +    __slots__ = ('name', 'decls', 'coord', '__weakref__')
           def __init__(self, name, decls, coord=None):
               self.name = name
               self.decls = decls
      @@ -616,9 +655,10 @@
                   nodelist.append(("decls[%d]" % i, child))
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class StructRef(Node):
      +    __slots__ = ('name', 'type', 'field', 'coord', '__weakref__')
           def __init__(self, name, type, field, coord=None):
               self.name = name
               self.type = type
      @@ -631,9 +671,10 @@
               if self.field is not None: nodelist.append(("field", self.field))
               return tuple(nodelist)
       
      -    attr_names = ('type',)
      +    attr_names = ('type', )
       
       class Switch(Node):
      +    __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
           def __init__(self, cond, stmt, coord=None):
               self.cond = cond
               self.stmt = stmt
      @@ -648,6 +689,7 @@
           attr_names = ()
       
       class TernaryOp(Node):
      +    __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
           def __init__(self, cond, iftrue, iffalse, coord=None):
               self.cond = cond
               self.iftrue = iftrue
      @@ -664,6 +706,7 @@
           attr_names = ()
       
       class TypeDecl(Node):
      +    __slots__ = ('declname', 'quals', 'type', 'coord', '__weakref__')
           def __init__(self, declname, quals, type, coord=None):
               self.declname = declname
               self.quals = quals
      @@ -675,9 +718,10 @@
               if self.type is not None: nodelist.append(("type", self.type))
               return tuple(nodelist)
       
      -    attr_names = ('declname','quals',)
      +    attr_names = ('declname', 'quals', )
       
       class Typedef(Node):
      +    __slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__')
           def __init__(self, name, quals, storage, type, coord=None):
               self.name = name
               self.quals = quals
      @@ -690,10 +734,12 @@
               if self.type is not None: nodelist.append(("type", self.type))
               return tuple(nodelist)
       
      -    attr_names = ('name','quals','storage',)
      +    attr_names = ('name', 'quals', 'storage', )
       
       class Typename(Node):
      -    def __init__(self, quals, type, coord=None):
      +    __slots__ = ('name', 'quals', 'type', 'coord', '__weakref__')
      +    def __init__(self, name, quals, type, coord=None):
      +        self.name = name
               self.quals = quals
               self.type = type
               self.coord = coord
      @@ -703,9 +749,10 @@
               if self.type is not None: nodelist.append(("type", self.type))
               return tuple(nodelist)
       
      -    attr_names = ('quals',)
      +    attr_names = ('name', 'quals', )
       
       class UnaryOp(Node):
      +    __slots__ = ('op', 'expr', 'coord', '__weakref__')
           def __init__(self, op, expr, coord=None):
               self.op = op
               self.expr = expr
      @@ -716,9 +763,10 @@
               if self.expr is not None: nodelist.append(("expr", self.expr))
               return tuple(nodelist)
       
      -    attr_names = ('op',)
      +    attr_names = ('op', )
       
       class Union(Node):
      +    __slots__ = ('name', 'decls', 'coord', '__weakref__')
           def __init__(self, name, decls, coord=None):
               self.name = name
               self.decls = decls
      @@ -730,9 +778,10 @@
                   nodelist.append(("decls[%d]" % i, child))
               return tuple(nodelist)
       
      -    attr_names = ('name',)
      +    attr_names = ('name', )
       
       class While(Node):
      +    __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
           def __init__(self, cond, stmt, coord=None):
               self.cond = cond
               self.stmt = stmt
      diff --git a/lib_pypy/cffi/_pycparser/c_generator.py b/lib_pypy/cffi/_pycparser/c_generator.py
      --- a/lib_pypy/cffi/_pycparser/c_generator.py
      +++ b/lib_pypy/cffi/_pycparser/c_generator.py
      @@ -3,7 +3,7 @@
       #
       # C code generator from pycparser AST nodes.
       #
      -# Copyright (C) 2008-2012, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #------------------------------------------------------------------------------
       from . import c_ast
      @@ -15,8 +15,6 @@
               generic_visit.
           """
           def __init__(self):
      -        self.output = ''
      -
               # Statements start with indentation of self.indent_level spaces, using
               # the _make_indent method
               #
      @@ -34,7 +32,7 @@
               if node is None:
                   return ''
               else:
      -            return ''.join(self.visit(c) for c in node.children())
      +            return ''.join(self.visit(c) for c_name, c in node.children())
       
           def visit_Constant(self, n):
               return n.value
      @@ -83,19 +81,22 @@
           def visit_IdentifierType(self, n):
               return ' '.join(n.names)
       
      +    def _visit_expr(self, n):
      +        if isinstance(n, c_ast.InitList):
      +            return '{' + self.visit(n) + '}'
      +        elif isinstance(n, c_ast.ExprList):
      +            return '(' + self.visit(n) + ')'
      +        else:
      +            return self.visit(n)
      +
           def visit_Decl(self, n, no_type=False):
               # no_type is used when a Decl is part of a DeclList, where the type is
      -        # explicitly only for the first delaration in a list.
      +        # explicitly only for the first declaration in a list.
               #
               s = n.name if no_type else self._generate_decl(n)
               if n.bitsize: s += ' : ' + self.visit(n.bitsize)
               if n.init:
      -            if isinstance(n.init, c_ast.InitList):
      -                s += ' = {' + self.visit(n.init) + '}'
      -            elif isinstance(n.init, c_ast.ExprList):
      -                s += ' = (' + self.visit(n.init) + ')'
      -            else:
      -                s += ' = ' + self.visit(n.init)
      +            s += ' = ' + self._visit_expr(n.init)
               return s
       
           def visit_DeclList(self, n):
      @@ -118,21 +119,13 @@
           def visit_ExprList(self, n):
               visited_subexprs = []
               for expr in n.exprs:
      -            if isinstance(expr, c_ast.ExprList):
      -                visited_subexprs.append('{' + self.visit(expr) + '}')
      -            else:
      -                visited_subexprs.append(self.visit(expr))
      +            visited_subexprs.append(self._visit_expr(expr))
               return ', '.join(visited_subexprs)
       
           def visit_InitList(self, n):
               visited_subexprs = []
               for expr in n.exprs:
      -            if isinstance(expr, c_ast.ExprList):
      -                visited_subexprs.append('(' + self.visit(expr) + ')')
      -            elif isinstance(expr, c_ast.InitList):
      -                visited_subexprs.append('{' + self.visit(expr) + '}')
      -            else:
      -                visited_subexprs.append(self.visit(expr))
      +            visited_subexprs.append(self._visit_expr(expr))
               return ', '.join(visited_subexprs)
       
           def visit_Enum(self, n):
      @@ -195,9 +188,9 @@
               return 'continue;'
       
           def visit_TernaryOp(self, n):
      -        s = self.visit(n.cond) + ' ? '
      -        s += self.visit(n.iftrue) + ' : '
      -        s += self.visit(n.iffalse)
      +        s = self._visit_expr(n.cond) + ' ? '
      +        s += self._visit_expr(n.iftrue) + ' : '
      +        s += self._visit_expr(n.iffalse)
               return s
       
           def visit_If(self, n):
      @@ -281,6 +274,9 @@
               s += ' = ' + self.visit(n.expr)
               return s
       
      +    def visit_FuncDecl(self, n):
      +        return self._generate_type(n)
      +
           def _generate_struct_union(self, n, name):
               """ Generates code for structs and unions. name should be either
                   'struct' or union.
      @@ -384,7 +380,7 @@
               """ Visits 'n' and returns its string representation, parenthesized
                   if the condition function applied to the node returns True.
               """
      -        s = self.visit(n)
      +        s = self._visit_expr(n)
               if condition(n):
                   return '(' + s + ')'
               else:
      @@ -401,5 +397,3 @@
               """
               return isinstance(n,(   c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
                                       c_ast.StructRef, c_ast.FuncCall))
      -
      -
      diff --git a/lib_pypy/cffi/_pycparser/c_lexer.py b/lib_pypy/cffi/_pycparser/c_lexer.py
      --- a/lib_pypy/cffi/_pycparser/c_lexer.py
      +++ b/lib_pypy/cffi/_pycparser/c_lexer.py
      @@ -3,7 +3,7 @@
       #
       # CLexer class: lexer for the C language
       #
      -# Copyright (C) 2008-2013, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #------------------------------------------------------------------------------
       import re
      @@ -102,7 +102,8 @@
           keywords = (
               '_BOOL', '_COMPLEX', 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST',
               'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
      -        'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG', 'REGISTER',
      +        'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG', 
      +        'REGISTER', 'OFFSETOF',
               'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
               'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
               'VOLATILE', 'WHILE',
      @@ -129,7 +130,7 @@
               'TYPEID',
       
               # constants
      -        'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX',
      +        'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN',
               'FLOAT_CONST', 'HEX_FLOAT_CONST',
               'CHAR_CONST',
               'WCHAR_CONST',
      @@ -183,12 +184,15 @@
       
           hex_prefix = '0[xX]'
           hex_digits = '[0-9a-fA-F]+'
      +    bin_prefix = '0[bB]'
      +    bin_digits = '[01]+'
       
           # integer constants (K&R2: A.2.5.1)
           integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?'
           decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
           octal_constant = '0[0-7]*'+integer_suffix_opt
           hex_constant = hex_prefix+hex_digits+integer_suffix_opt
      +    bin_constant = bin_prefix+bin_digits+integer_suffix_opt
       
           bad_octal_constant = '0[0-7]*[89]'
       
      @@ -302,7 +306,7 @@
               r'pragma'
               pass
       
      -    t_pppragma_ignore = ' \t<>.-{}();+-*/$%@&^~!?:,0123456789'
      +    t_pppragma_ignore = ' \t<>.-{}();=+-*/$%@&^~!?:,0123456789'
       
           @TOKEN(string_literal)
           def t_pppragma_STR(self, t): pass
      @@ -419,6 +423,10 @@
           def t_INT_CONST_HEX(self, t):
               return t
       
      +    @TOKEN(bin_constant)
      +    def t_INT_CONST_BIN(self, t):
      +        return t
      +
           @TOKEN(bad_octal_constant)
           def t_BAD_CONST_OCT(self, t):
               msg = "Invalid octal constant"
      diff --git a/lib_pypy/cffi/_pycparser/c_parser.py b/lib_pypy/cffi/_pycparser/c_parser.py
      --- a/lib_pypy/cffi/_pycparser/c_parser.py
      +++ b/lib_pypy/cffi/_pycparser/c_parser.py
      @@ -3,7 +3,7 @@
       #
       # CParser class: Parser and AST builder for the C language
       #
      -# Copyright (C) 2008-2013, Eli Bendersky
      +# Copyright (C) 2008-2015, Eli Bendersky
       # License: BSD
       #------------------------------------------------------------------------------
       import re
      @@ -23,7 +23,8 @@
                   lextab='cffi._pycparser.lextab',
                   yacc_optimize=True,
                   yacctab='cffi._pycparser.yacctab',
      -            yacc_debug=False):
      +            yacc_debug=False,
      +            taboutputdir=''):
               """ Create a new CParser.
       
                   Some arguments for controlling the debug/optimization
      @@ -64,6 +65,10 @@
                   yacc_debug:
                       Generate a parser.out file that explains how yacc
                       built the parsing table from the grammar.
      +
      +            taboutputdir:
      +                Set this parameter to control the location of generated
      +                lextab and yacctab files.
               """
               self.clex = CLexer(
                   error_func=self._lex_error_func,
      @@ -73,7 +78,8 @@
       
               self.clex.build(
                   optimize=lex_optimize,
      -            lextab=lextab)
      +            lextab=lextab,
      +            outputdir=taboutputdir)
               self.tokens = self.clex.tokens
       
               rules_with_opt = [
      @@ -85,6 +91,7 @@
                   'expression',
                   'identifier_list',
                   'init_declarator_list',
      +            'initializer_list',
                   'parameter_type_list',
                   'specifier_qualifier_list',
                   'block_item_list',
      @@ -100,7 +107,8 @@
                   start='translation_unit_or_empty',
                   debug=yacc_debug,
                   optimize=yacc_optimize,
      -            tabmodule=yacctab)
      +            tabmodule=yacctab,
      +            outputdir=taboutputdir)
       
               # Stack of scopes for keeping track of symbols. _scope_stack[-1] is
               # the current (topmost) scope. Each scope is a dictionary that
      @@ -211,13 +219,11 @@
           # The basic declaration here is 'int c', and the pointer and
           # the array are the modifiers.
           #
      -    # Basic declarations are represented by TypeDecl (from module
      -    # c_ast) and the modifiers are FuncDecl, PtrDecl and
      -    # ArrayDecl.
      +    # Basic declarations are represented by TypeDecl (from module c_ast) and the
      +    # modifiers are FuncDecl, PtrDecl and ArrayDecl.
           #
      -    # The standard states that whenever a new modifier is parsed,
      -    # it should be added to the end of the list of modifiers. For
      -    # example:
      +    # The standard states that whenever a new modifier is parsed, it should be
      +    # added to the end of the list of modifiers. For example:
           #
           # K&R2 A.8.6.2: Array Declarators
           #
      @@ -236,7 +242,6 @@
           # useful for pointers, that can come as a chain from the rule
           # p_pointer. In this case, the whole modifier list is spliced
           # into the new location.
      -    #
           def _type_modify_decl(self, decl, modifier):
               """ Tacks a type modifier on a declarator, and returns
                   the modified declarator.
      @@ -983,28 +988,52 @@
               p[0] = p[2]
       
           def p_direct_declarator_3(self, p):
      -        """ direct_declarator   : direct_declarator LBRACKET assignment_expression_opt RBRACKET
      +        """ direct_declarator   : direct_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
               """
      +        quals = (p[3] if len(p) > 5 else []) or []
      +        # Accept dimension qualifiers
      +        # Per C99 6.7.5.3 p7
               arr = c_ast.ArrayDecl(
                   type=None,
      -            dim=p[3],
      +            dim=p[4] if len(p) > 5 else p[3],
      +            dim_quals=quals,
      +            coord=p[1].coord)
      +
      +        p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
      +
      +    def p_direct_declarator_4(self, p):
      +        """ direct_declarator   : direct_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
      +                                | direct_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
      +        """
      +        # Using slice notation for PLY objects doesn't work in Python 3 for the
      +        # version of PLY embedded with pycparser; see PLY Google Code issue 30.
      +        # Work around that here by listing the two elements separately.
      +        listed_quals = [item if isinstance(item, list) else [item]
      +            for item in [p[3],p[4]]]
      +        dim_quals = [qual for sublist in listed_quals for qual in sublist
      +            if qual is not None]
      +        arr = c_ast.ArrayDecl(
      +            type=None,
      +            dim=p[5],
      +            dim_quals=dim_quals,
                   coord=p[1].coord)
       
               p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
       
           # Special for VLAs
           #
      -    def p_direct_declarator_4(self, p):
      -        """ direct_declarator   : direct_declarator LBRACKET TIMES RBRACKET
      +    def p_direct_declarator_5(self, p):
      +        """ direct_declarator   : direct_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
               """
               arr = c_ast.ArrayDecl(
                   type=None,
      -            dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
      +            dim=c_ast.ID(p[4], self._coord(p.lineno(4))),
      +            dim_quals=p[3] if p[3] != None else [],
                   coord=p[1].coord)
       
               p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
       
      -    def p_direct_declarator_5(self, p):
      +    def p_direct_declarator_6(self, p):
               """ direct_declarator   : direct_declarator LPAREN parameter_type_list RPAREN
                                       | direct_declarator LPAREN identifier_list_opt RPAREN
               """
      @@ -1037,11 +1066,30 @@
                           | TIMES type_qualifier_list_opt pointer
               """
               coord = self._coord(p.lineno(1))
      -
      -        p[0] = c_ast.PtrDecl(
      -            quals=p[2] or [],
      -            type=p[3] if len(p) > 3 else None,
      -            coord=coord)
      +        # Pointer decls nest from inside out. This is important when different
      +        # levels have different qualifiers. For example:
      +        #
      +        #  char * const * p;
      +        #
      +        # Means "pointer to const pointer to char"
      +        #
      +        # While: 
      +        #
      +        #  char ** const p;
      +        #
      +        # Means "const pointer to pointer to char"
      +        #
      +        # So when we construct PtrDecl nestings, the leftmost pointer goes in
      +        # as the most nested type.
      +        nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
      +        if len(p) > 3:
      +            tail_type = p[3]
      +            while tail_type.type is not None:
      +                tail_type = tail_type.type
      +            tail_type.type = nested_type
      +            p[0] = p[3]
      +        else:
      +            p[0] = nested_type
       
           def p_type_qualifier_list(self, p):
               """ type_qualifier_list : type_qualifier
      @@ -1101,6 +1149,7 @@
               #
               else:
                   decl = c_ast.Typename(
      +                name='',
                       quals=spec['qual'],
                       type=p[2] or c_ast.TypeDecl(None, None, None),
                       coord=self._coord(p.lineno(2)))
      @@ -1125,10 +1174,13 @@
               p[0] = p[1]
       
           def p_initializer_2(self, p):
      -        """ initializer : brace_open initializer_list brace_close
      +        """ initializer : brace_open initializer_list_opt brace_close
                               | brace_open initializer_list COMMA brace_close
               """
      -        p[0] = p[2]
      +        if p[2] is None:
      +            p[0] = c_ast.InitList([], self._coord(p.lineno(1)))
      +        else:
      +            p[0] = p[2]
       
           def p_initializer_list(self, p):
               """ initializer_list    : designation_opt initializer
      @@ -1172,6 +1224,7 @@
               #~ print '=========='
       
               typename = c_ast.Typename(
      +            name='',
                   quals=p[1]['qual'],
                   type=p[2] or c_ast.TypeDecl(None, None, None),
                   coord=self._coord(p.lineno(2)))
      @@ -1211,6 +1264,7 @@
               arr = c_ast.ArrayDecl(
                   type=None,
                   dim=p[3],
      +            dim_quals=[],
                   coord=p[1].coord)
       
               p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
      @@ -1221,6 +1275,7 @@
               p[0] = c_ast.ArrayDecl(
                   type=c_ast.TypeDecl(None, None, None),
                   dim=p[2],
      +            dim_quals=[],
                   coord=self._coord(p.lineno(1)))
       
           def p_direct_abstract_declarator_4(self, p):
      @@ -1229,6 +1284,7 @@
               arr = c_ast.ArrayDecl(
                   type=None,
                   dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
      +            dim_quals=[],
                   coord=p[1].coord)
       
               p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
      @@ -1239,6 +1295,7 @@
               p[0] = c_ast.ArrayDecl(
                   type=c_ast.TypeDecl(None, None, None),
                   dim=c_ast.ID(p[3], self._coord(p.lineno(3))),
      +            dim_quals=[],
                   coord=self._coord(p.lineno(1)))
       
           def p_direct_abstract_declarator_6(self, p):
      @@ -1322,7 +1379,8 @@
       
           def p_iteration_statement_4(self, p):
               """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
      -        p[0] = c_ast.For(c_ast.DeclList(p[3]), p[4], p[6], p[8], self._coord(p.lineno(1)))
      +        p[0] = c_ast.For(c_ast.DeclList(p[3], self._coord(p.lineno(1))),
      +                         p[4], p[6], p[8], self._coord(p.lineno(1)))
       
           def p_jump_statement_1(self, p):
               """ jump_statement  : GOTO ID SEMI """
      @@ -1525,6 +1583,14 @@
               """ primary_expression  : LPAREN expression RPAREN """
               p[0] = p[2]
       
      +    def p_primary_expression_5(self, p):
      +        """ primary_expression  : OFFSETOF LPAREN type_name COMMA identifier RPAREN
      +        """
      +        coord = self._coord(p.lineno(1))
      +        p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
      +                              c_ast.ExprList([p[3], p[5]], coord),
      +                              coord)
      +
           def p_argument_expression_list(self, p):
               """ argument_expression_list    : assignment_expression
                                               | argument_expression_list COMMA assignment_expression
      @@ -1543,6 +1609,7 @@
               """ constant    : INT_CONST_DEC
                               | INT_CONST_OCT
                               | INT_CONST_HEX
      +                        | INT_CONST_BIN
               """
               p[0] = c_ast.Constant(
                   'int', p[1], self._coord(p.lineno(1)))
      @@ -1585,7 +1652,7 @@
                   p[0] = c_ast.Constant(
                       'string', p[1], self._coord(p.lineno(1)))
      
      From pypy.commits at gmail.com  Thu Jan 28 05:32:58 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Thu, 28 Jan 2016 02:32:58 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: removed size info from call
       builder when assembling call_assembler (it is not needed), 
      Message-ID: <56a9ee5a.c5321c0a.296d9.ffffaf1c@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81992:e6d4a987b802
      Date: 2016-01-28 11:32 +0100
      http://bitbucket.org/pypy/pypy/changeset/e6d4a987b802/
      
      Log:	removed size info from call builder when assembling call_assembler
      	(it is not needed), removed several not necessary stack frame
      	allocations in the jit
      
      diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
      --- a/rpython/jit/backend/zarch/assembler.py
      +++ b/rpython/jit/backend/zarch/assembler.py
      @@ -44,7 +44,6 @@
               self.current_clt = None
               self._regalloc = None
               self.datablockwrapper = None
      -        self.subject_op = None # needed in call assembler to pass by the operation
               self.propagate_exception_path = 0
               self.stack_check_slowpath = 0
               self.loop_run_counters = []
      @@ -332,10 +331,8 @@
       
               # Do the call
               adr = rffi.cast(lltype.Signed, self.cpu.realloc_frame)
      -        mc.push_std_frame()
               mc.load_imm(mc.RAW_CALL_REG, adr)
               mc.raw_call()
      -        mc.pop_std_frame()
       
               # The result is stored back into SPP (= r31)
               mc.LGR(r.SPP, r.r2)
      @@ -595,11 +592,9 @@
               # LGHI r0, ... (4  bytes)
               #       sum -> (14 bytes)
               mc.write('\x00'*14)
      -        mc.push_std_frame()
               mc.load_imm(r.RETURN, self._frame_realloc_slowpath)
               self.load_gcmap(mc, r.r1, gcmap)
               mc.raw_call()
      -        mc.pop_std_frame()
       
               self.frame_depth_to_patch.append((patch_pos, mc.currpos()))
       
      diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
      --- a/rpython/jit/backend/zarch/opassembler.py
      +++ b/rpython/jit/backend/zarch/opassembler.py
      @@ -1034,10 +1034,8 @@
               if basesize != 0:
                   self.mc.AGHI(r.r2, l.imm(basesize))
       
      -        self.mc.push_std_frame()
               self.mc.load_imm(self.mc.RAW_CALL_REG, self.memcpy_addr)
               self.mc.raw_call()
      -        self.mc.pop_std_frame()
       
           def emit_zero_array(self, op, arglocs, regalloc):
               base_loc, startindex_loc, length_loc, \
      @@ -1090,9 +1088,7 @@
                   vloc = imm(0)
               self._store_force_index(self._find_nearby_operation(regalloc, +1))
               # 'result_loc' is either r2, f0 or None
      -        self.subject_op = op
               self.call_assembler(op, argloc, vloc, result_loc, r.r2)
      -        self.subject_op = None
               self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos()))
       
           emit_call_assembler_i = _genop_call_assembler
      @@ -1106,13 +1102,11 @@
               self.regalloc_mov(argloc, r.r2)
               self.mc.LG(r.r3, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP))
       
      -        descr = self.subject_op.getdescr()
      -        cb = callbuilder.CallBuilder(self, addr, [r.r2, r.r3], r.r2, descr)
      +        cb = callbuilder.CallBuilder(self, addr, [r.r2, r.r3], r.r2, None)
               cb.emit()
       
           def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc):
      -        descr = self.subject_op.getdescr()
      -        cb = callbuilder.CallBuilder(self, addr, arglocs, result_loc, descr)
      +        cb = callbuilder.CallBuilder(self, addr, arglocs, result_loc, None)
               cb.emit()
       
           def _call_assembler_check_descr(self, value, tmploc):
      diff --git a/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py
      --- a/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py
      +++ b/rpython/jit/backend/zarch/test/test_ztranslation_call_assembler.py
      @@ -1,10 +1,11 @@
       from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler
       from rpython.translator.translator import TranslationContext
       from rpython.config.translationoption import DEFL_GC
      -from rpython.jit.backend.zarch.arch import WORD
      -import sys
       
       class TestTranslationCallAssemblerZARCH(TranslationTestCallAssembler):
      -    def _check_cbuilder(self, cbuilder):
      -        pass
      +    def _get_TranslationContext(self):
      +        t = TranslationContext()
      +        t.config.translation.gc = DEFL_GC   # 'hybrid' or 'minimark'
      +        t.config.translation.list_comprehension_operations = True
      +        return t
       
      diff --git a/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py b/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py
      --- a/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py
      +++ b/rpython/jit/backend/zarch/test/test_ztranslation_external_exception.py
      @@ -1,19 +1,11 @@
       from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
       from rpython.translator.translator import TranslationContext
       from rpython.config.translationoption import DEFL_GC
      -from rpython.translator.platform import platform as compiler
       
      -if compiler.name == 'msvc':
      -    _MSVC = True
      -else:
      -    _MSVC = False
      -
      -class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest):
      +class TestTranslationRemoveTypePtrZARCH(TranslationRemoveTypePtrTest):
           def _get_TranslationContext(self):
               t = TranslationContext()
               t.config.translation.gc = DEFL_GC   # 'hybrid' or 'minimark'
      -        if not _MSVC:
      -            t.config.translation.gcrootfinder = 'asmgcc'
               t.config.translation.list_comprehension_operations = True
               t.config.translation.gcremovetypeptr = True
               return t
      
      From pypy.commits at gmail.com  Thu Jan 28 08:13:42 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Thu, 28 Jan 2016 05:13:42 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: added SGRK instruction,
       now the stack overflow is correctly detected and the exception is
       thrown!
      Message-ID: <56aa1406.85e41c0a.25a84.ffffebfc@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81993:394d3ce64a00
      Date: 2016-01-28 14:12 +0100
      http://bitbucket.org/pypy/pypy/changeset/394d3ce64a00/
      
      Log:	added SGRK instruction, now the stack overflow is correctly detected
      	and the exception is thrown!
      
      diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
      --- a/rpython/jit/backend/zarch/assembler.py
      +++ b/rpython/jit/backend/zarch/assembler.py
      @@ -562,20 +562,18 @@
                   assert check_imm_value(diff)
       
                   mc = self.mc
      -            mc.load_imm(r.SCRATCH, endaddr)     # li r0, endaddr
      -            mc.load(r.r14, r.SCRATCH, 0)        # lg r14, [end]
      -            mc.load(r.SCRATCH, r.SCRATCH, diff) # lg r0, [length]
      -            mc.LGR(r.SCRATCH2, r.SP)
      -            mc.SGR(r.SCRATCH2, r.r14)           # sub r1, (SP - r14)
      -            jmp_pos = self.mc.currpos()
      -            self.mc.reserve_cond_jump()
      -
      +            mc.load_imm(r.r1, endaddr)
      +            mc.load(r.r0, r.r1, 0)    # ld r0, [end]
      +            mc.load(r.r1, r.r1, diff) # ld r1, [length]
      +            mc.SGRK(r.r0, r.SP, r.r0)
      +            jmp_pos = self.mc.get_relative_pos()
      +            mc.reserve_cond_jump()
                   mc.load_imm(r.r14, self.stack_check_slowpath)
                   mc.BASR(r.r14, r.r14)
       
                   currpos = self.mc.currpos()
      -            pmc = OverwritingBuilder(mc, jmp_pos, 1)
      -            pmc.CLGRJ(r.SCRATCH2, r.SCRATCH, c.GT, l.imm(currpos - jmp_pos))
      +            pmc = OverwritingBuilder(self.mc, jmp_pos, 1)
      +            pmc.CLGRJ(r.r0, r.r1, c.LE, l.imm(currpos - jmp_pos))
                   pmc.overwrite()
       
           def _check_frame_depth(self, mc, gcmap):
      diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py
      --- a/rpython/jit/backend/zarch/instruction_builder.py
      +++ b/rpython/jit/backend/zarch/instruction_builder.py
      @@ -388,6 +388,12 @@
           byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4)
           self.writechar(chr(byte))
       
      +def build_rrf_a(mnemonic, (opcode1,opcode2), argtypes='r,r,r'):
      +    @builder.arguments(argtypes)
      +    def encode_rrf_a(self, r1, r2, r3):
      +        _encode_rrf(self, opcode1, opcode2, r1, r2, r3, 0)
      +    return encode_rrf_a
      +
       def build_rrf_c(mnemonic, (opcode1,opcode2), argtypes='r,r,r/m,-'):
           @builder.arguments(argtypes)
           def encode_rrf_b(self, r1, r2, rm3, rm4):
      diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py
      --- a/rpython/jit/backend/zarch/instructions.py
      +++ b/rpython/jit/backend/zarch/instructions.py
      @@ -21,6 +21,7 @@
           'SR':      ('rr',    ['\x1B']),
           'SG':      ('rxy',   ['\xE3','\x09']),
           'SGR':     ('rre',   ['\xB9','\x09']),
      +    'SGRK':    ('rrf_a',   ['\xB9','\xE9']),
           # mul
           'MSGR':    ('rre',   ['\xB9','\x0C']),
           'MSG':     ('rxy',   ['\xE3','\x0C']),
      @@ -287,3 +288,9 @@
       all_mnemonic_codes.update(memory_mnemonic_codes)
       all_mnemonic_codes.update(floatingpoint_mnemonic_codes)
       all_mnemonic_codes.update(branch_mnemonic_codes)
      +
      +
      +if __name__ == "__main__":
      +    print("%d instructions:" % len(all_mnemonic_codes))
      +    for name, (typeinstr, _) in all_mnemonic_codes.items():
      +        print(" %s\t(type: %s)" % (name, typeinstr))
      
      From pypy.commits at gmail.com  Thu Jan 28 08:22:30 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Thu, 28 Jan 2016 05:22:30 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: extended the stack limit of s390x
      Message-ID: <56aa1616.482e1c0a.a752.ffffeff7@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81994:eaa7b84cc9cd
      Date: 2016-01-28 14:21 +0100
      http://bitbucket.org/pypy/pypy/changeset/eaa7b84cc9cd/
      
      Log:	extended the stack limit of s390x
      
      diff --git a/rpython/translator/c/src/stack.h b/rpython/translator/c/src/stack.h
      --- a/rpython/translator/c/src/stack.h
      +++ b/rpython/translator/c/src/stack.h
      @@ -11,6 +11,9 @@
             * value of 768 kb is only enough for 406 levels on ppc64, and 792
             * on ppc64le */
       #    define MAX_STACK_SIZE (11 << 18)    /* 2.8 mb */
      +#  elif defined(__s390x__)
      +     /* S390X as well has pretty large stack frames. */
      +#    define MAX_STACK_SIZE (11 << 18)    /* 2.8 mb */
       #  else
       #    define MAX_STACK_SIZE (3 << 18)    /* 768 kb */
       #  endif
      
      From pypy.commits at gmail.com  Thu Jan 28 14:20:41 2016
      From: pypy.commits at gmail.com (amauryfa)
      Date: Thu, 28 Jan 2016 11:20:41 -0800 (PST)
      Subject: [pypy-commit] pypy py3.3: Add pickle support to
       itertools.combinations
      Message-ID: <56aa6a09.ca56c20a.9a2fd.ffff9683@mx.google.com>
      
      Author: Amaury Forgeot d'Arc 
      Branch: py3.3
      Changeset: r81996:e4a76a0698fc
      Date: 2016-01-27 09:35 +0100
      http://bitbucket.org/pypy/pypy/changeset/e4a76a0698fc/
      
      Log:	Add pickle support to itertools.combinations (and probably
      	combinations_with_replacement as well)
      
      diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
      --- a/pypy/module/itertools/interp_itertools.py
      +++ b/pypy/module/itertools/interp_itertools.py
      @@ -1088,6 +1088,42 @@
               self.last_result_w = result_w
               return space.newtuple(result_w)
       
      +    def descr_reduce(self, space):
      +        if self.stopped:
      +            pool_w = []
      +        else:
      +            pool_w = self.pool_w
      +        result_w = [
      +            space.type(self),
      +            space.newtuple([
      +                space.newtuple(pool_w), space.wrap(self.r)
      +            ])]
      +        if self.last_result_w is not None and not self.stopped:
      +            # we must pickle the indices and use them for setstate
      +            result_w = result_w + [
      +                space.newtuple([
      +                    space.wrap(index) for index in self.indices])]
      +        return space.newtuple(result_w)
      +
      +    def descr_setstate(self, space, w_state):
      +        indices_w = space.fixedview(w_state)
      +        if len(indices_w) != self.r:
      +            import pdb;pdb.set_trace()
      +            raise OperationError(space.w_ValueError, space.wrap(
      +                "invalid arguments"))
      +        for i in range(self.r):
      +            index = space.int_w(indices_w[i])
      +            max = self.get_maximum(i)
      +            # clamp the index (beware of negative max)
      +            if index > max:
      +                index = max
      +            if index < 0:
      +                index = 0
      +            self.indices.append(index)
      +        self.last_result_w = [
      +            self.pool_w[self.indices[i]]
      +            for i in range(self.r)]
      +
       @unwrap_spec(r=int)
       def W_Combinations__new__(space, w_subtype, w_iterable, r):
           pool_w = space.fixedview(w_iterable)
      @@ -1095,7 +1131,7 @@
               raise OperationError(space.w_ValueError,
                   space.wrap("r must be non-negative")
               )
      -    indices = range(len(pool_w))
      +    indices = range(r)
           res = space.allocate_instance(W_Combinations, w_subtype)
           res.__init__(space, pool_w, indices, r)
           return space.wrap(res)
      @@ -1104,6 +1140,8 @@
           __new__ = interp2app(W_Combinations__new__),
           __iter__ = interp2app(W_Combinations.descr__iter__),
           __next__ = interp2app(W_Combinations.descr_next),
      +    __reduce__ = interp2app(W_Combinations.descr_reduce),
      +    __setstate__ = interp2app(W_Combinations.descr_setstate),
           __doc__ = """\
       combinations(iterable, r) --> combinations object
       
      diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
      --- a/pypy/module/itertools/test/test_itertools.py
      +++ b/pypy/module/itertools/test/test_itertools.py
      @@ -975,6 +975,25 @@
               islice = itertools.islice(myiter, 5, 8)
               raises(StopIteration, islice.__next__)
       
      +    def test_combinations_pickle(self):
      +        from itertools import combinations
      +        import pickle
      +        for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))):
      +            assert list(op(combinations('abc', 32))) == []     # r > n
      +            assert list(op(combinations('ABCD', 2))) == [
      +                ('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]
      +            testIntermediate = combinations('ABCD', 2)
      +            next(testIntermediate)
      +            assert list(op(testIntermediate)) == [
      +                ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]
      +
      +            assert list(op(combinations(range(4), 3))) == [
      +                (0,1,2), (0,1,3), (0,2,3), (1,2,3)]
      +            testIntermediate = combinations(range(4), 3)
      +            next(testIntermediate)
      +            assert list(op(testIntermediate)) == [
      +                (0,1,3), (0,2,3), (1,2,3)]
      +
       
       class AppTestItertools32:
           spaceconfig = dict(usemodules=['itertools'])
      
      From pypy.commits at gmail.com  Thu Jan 28 14:20:39 2016
      From: pypy.commits at gmail.com (amauryfa)
      Date: Thu, 28 Jan 2016 11:20:39 -0800 (PST)
      Subject: [pypy-commit] pypy py3.3: Fix rposix module after bad merge
      Message-ID: <56aa6a07.022f1c0a.276b3.7adc@mx.google.com>
      
      Author: Amaury Forgeot d'Arc 
      Branch: py3.3
      Changeset: r81995:a9fccbdef513
      Date: 2016-01-28 17:44 +0100
      http://bitbucket.org/pypy/pypy/changeset/a9fccbdef513/
      
      Log:	Fix rposix module after bad merge
      
      diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
      --- a/rpython/rlib/rposix.py
      +++ b/rpython/rlib/rposix.py
      @@ -27,6 +27,22 @@
           from rpython.rlib import rwin32
           from rpython.rlib.rwin32file import make_win32_traits
       
      +class CConfig:
      +    _compilation_info_ = ExternalCompilationInfo(
      +        includes=['sys/stat.h',
      +                  'unistd.h',
      +                  'fcntl.h'],
      +    )
      +    for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir
      +                    fpathconf fstat fstatat fstatvfs ftruncate futimens futimes
      +                    futimesat linkat lchflags lchmod lchown lstat lutimes
      +                    mkdirat mkfifoat mknodat openat readlinkat renameat
      +                    symlinkat unlinkat utimensat""".split():
      +        locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name)
      +cConfig = rffi_platform.configure(CConfig)
      +globals().update(cConfig)
      +
      +
       class CConstantErrno(CConstant):
           # these accessors are used when calling get_errno() or set_errno()
           # on top of CPython
      @@ -1053,6 +1069,13 @@
               if not win32traits.MoveFile(path1, path2):
                   raise rwin32.lastSavedWindowsError()
       
      + at specialize.argtype(0, 1)
      +def replace(path1, path2):
      +    if os.name == 'nt':
      +        raise NotImplementedError(
      +            'On windows, os.replace() should overwrite the destination')
      +    return rename(path1, path2)
      +
       #___________________________________________________________________
       
       c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT,
      diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py
      --- a/rpython/rlib/rtime.py
      +++ b/rpython/rlib/rtime.py
      @@ -9,7 +9,6 @@
       from rpython.rtyper.tool import rffi_platform
       from rpython.rtyper.lltypesystem import rffi, lltype
       from rpython.rlib.objectmodel import register_replacement_for
      -from rpython.rlib import jit
       from rpython.rlib.rarithmetic import intmask, UINT_MAX
       from rpython.rlib import rposix
       
      @@ -168,28 +167,30 @@
           c_clock_gettime = external('clock_gettime',
                                      [lltype.Signed, lltype.Ptr(TIMESPEC)],
                                      rffi.INT, releasegil=False)
      -else:
      +if need_rusage:
           RUSAGE = RUSAGE
           RUSAGE_SELF = RUSAGE_SELF or 0
           c_getrusage = external('getrusage',
                                  [rffi.INT, lltype.Ptr(RUSAGE)],
      -                           lltype.Void,
      +                           rffi.INT,
                                  releasegil=False)
       
      +def win_perf_counter():
      +    a = lltype.malloc(A, flavor='raw')
      +    if state.divisor == 0.0:
      +        QueryPerformanceCounter(a)
      +        state.counter_start = a[0]
      +        QueryPerformanceFrequency(a)
      +        state.divisor = float(a[0])
      +    QueryPerformanceCounter(a)
      +    diff = a[0] - state.counter_start
      +    lltype.free(a, flavor='raw')
      +    return float(diff) / state.divisor
      +
       @replace_time_function('clock')
      - at jit.dont_look_inside  # the JIT doesn't like FixedSizeArray
       def clock():
           if _WIN32:
      -        a = lltype.malloc(A, flavor='raw')
      -        if state.divisor == 0.0:
      -            QueryPerformanceCounter(a)
      -            state.counter_start = a[0]
      -            QueryPerformanceFrequency(a)
      -            state.divisor = float(a[0])
      -        QueryPerformanceCounter(a)
      -        diff = a[0] - state.counter_start
      -        lltype.free(a, flavor='raw')
      -        return float(diff) / state.divisor
      +        return win_perf_counter()
           elif CLOCK_PROCESS_CPUTIME_ID is not None:
               with lltype.scoped_alloc(TIMESPEC) as a:
                   c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a)
      
      From pypy.commits at gmail.com  Thu Jan 28 14:20:43 2016
      From: pypy.commits at gmail.com (amauryfa)
      Date: Thu, 28 Jan 2016 11:20:43 -0800 (PST)
      Subject: [pypy-commit] pypy py3.3: Add pickle support to itertools.cycle
      Message-ID: <56aa6a0b.460f1c0a.55306.7c35@mx.google.com>
      
      Author: Amaury Forgeot d'Arc 
      Branch: py3.3
      Changeset: r81997:cc69ca5d0d62
      Date: 2016-01-28 17:28 +0100
      http://bitbucket.org/pypy/pypy/changeset/cc69ca5d0d62/
      
      Log:	Add pickle support to itertools.cycle
      
      diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
      --- a/pypy/module/itertools/interp_itertools.py
      +++ b/pypy/module/itertools/interp_itertools.py
      @@ -575,6 +575,24 @@
                       self.saved_w.append(w_obj)
               return w_obj
       
      +    def descr_reduce(self, space):
      +        return space.newtuple([
      +            space.type(self),
      +            space.newtuple([self.w_iterable]),
      +            space.newtuple([
      +                space.newlist(self.saved_w),
      +                space.wrap(self.index),
      +                space.wrap(self.exhausted),
      +            ]),
      +        ])
      +
      +    def descr_setstate(self, space, w_state):
      +        w_saved, w_index, w_exhausted = space.unpackiterable(w_state, 3)
      +        self.saved_w = space.unpackiterable(w_saved)
      +        self.index = space.int_w(w_index)
      +        self.exhausted = space.bool_w(w_exhausted)
      +
      +
       def W_Cycle___new__(space, w_subtype, w_iterable):
           r = space.allocate_instance(W_Cycle, w_subtype)
           r.__init__(space, w_iterable)
      @@ -585,6 +603,8 @@
               __new__  = interp2app(W_Cycle___new__),
               __iter__ = interp2app(W_Cycle.iter_w),
               __next__ = interp2app(W_Cycle.next_w),
      +        __reduce__ = interp2app(W_Cycle.descr_reduce),
      +        __setstate__ = interp2app(W_Cycle.descr_setstate),
               __doc__  = """Make an iterator returning elements from the iterable and
           saving a copy of each. When the iterable is exhausted, return
           elements from the saved copy. Repeats indefinitely.
      @@ -895,6 +915,12 @@
                   if self.space.is_true(w_next_selector):
                       return w_next_item
       
      +    def descr_reduce(self, space):
      +        return space.newtuple([
      +            space.type(self),
      +            space.newtuple([self.w_data, self.w_selectors])
      +        ])
      +
       
       def W_Compress__new__(space, w_subtype, w_data, w_selectors):
           r = space.allocate_instance(W_Compress, w_subtype)
      @@ -906,6 +932,7 @@
           __new__ = interp2app(W_Compress__new__),
           __iter__ = interp2app(W_Compress.iter_w),
           __next__ = interp2app(W_Compress.next_w),
      +    __reduce__ = interp2app(W_Compress.descr_reduce),
           __doc__ = """Make an iterator that filters elements from *data* returning
          only those that have a corresponding element in *selectors* that evaluates to
          ``True``.  Stops when either the *data* or *selectors* iterables has been
      @@ -1108,7 +1135,6 @@
           def descr_setstate(self, space, w_state):
               indices_w = space.fixedview(w_state)
               if len(indices_w) != self.r:
      -            import pdb;pdb.set_trace()
                   raise OperationError(space.w_ValueError, space.wrap(
                       "invalid arguments"))
               for i in range(self.r):
      diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
      --- a/pypy/module/itertools/test/test_itertools.py
      +++ b/pypy/module/itertools/test/test_itertools.py
      @@ -994,6 +994,13 @@
                   assert list(op(testIntermediate)) == [
                       (0,1,3), (0,2,3), (1,2,3)]
       
      +    def test_cycle_pickle(self):
      +        import itertools, pickle
      +        c = itertools.cycle('abc')
      +        next(c)
      +        assert list(itertools.islice(
      +            pickle.loads(pickle.dumps(c)), 10)) == list('bcabcabcab')
      +
       
       class AppTestItertools32:
           spaceconfig = dict(usemodules=['itertools'])
      
      From pypy.commits at gmail.com  Thu Jan 28 15:36:24 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Thu, 28 Jan 2016 12:36:24 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: greater equal is not equal,
       this basically invoked realloc frame EVERY time the assembler
       location was visited
      Message-ID: <56aa7bc8.85b01c0a.b3ccf.ffff959a@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81998:2f0e451a18e2
      Date: 2016-01-28 21:35 +0100
      http://bitbucket.org/pypy/pypy/changeset/2f0e451a18e2/
      
      Log:	greater equal is not equal, this basically invoked realloc frame
      	EVERY time the assembler location was visited
      
      diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
      --- a/rpython/jit/backend/zarch/assembler.py
      +++ b/rpython/jit/backend/zarch/assembler.py
      @@ -582,7 +582,7 @@
               """
               descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu)
               ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr)
      -        mc.LG(r.SCRATCH2, l.addr(ofs, r.SPP))
      +        mc.LG(r.r1, l.addr(ofs, r.SPP))
               patch_pos = mc.currpos()
               # placeholder for the following instructions
               # CGFI r1, ... (6  bytes)
      @@ -602,8 +602,8 @@
               for traps_pos, jmp_target in self.frame_depth_to_patch:
                   pmc = OverwritingBuilder(self.mc, traps_pos, 3)
                   # three traps, so exactly three instructions to patch here
      -            pmc.CGFI(r.SCRATCH2, l.imm(frame_depth))
      -            pmc.BRC(c.EQ, l.imm(jmp_target - (traps_pos + 6)))
      +            pmc.CGFI(r.r1, l.imm(frame_depth))
      +            pmc.BRC(c.GE, l.imm(jmp_target - (traps_pos + 6)))
                   pmc.LGHI(r.r0, l.imm(frame_depth))
                   pmc.overwrite()
       
      
      From pypy.commits at gmail.com  Thu Jan 28 16:09:18 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Thu, 28 Jan 2016 13:09:18 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: adapted test to match the
       expected assembly after the changes
      Message-ID: <56aa837e.84e31c0a.6c49c.ffff9be0@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r81999:e13400c9ff8a
      Date: 2016-01-28 22:08 +0100
      http://bitbucket.org/pypy/pypy/changeset/e13400c9ff8a/
      
      Log:	adapted test to match the expected assembly after the changes
      
      diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py
      --- a/rpython/jit/backend/zarch/test/test_runner.py
      +++ b/rpython/jit/backend/zarch/test/test_runner.py
      @@ -26,5 +26,5 @@
       
           add_loop_instructions = "lg; lgr; larl; agr; cgfi; je; j;$"
           # realloc frame takes the most space (from just after larl, to lay)
      -    bridge_loop_instructions = "larl; lg; cgfi; je; lghi; stg; " \
      -                               "lay; lgfi;( iihf;)? lgfi;( iihf;)? basr; lay; lg; br;$"
      +    bridge_loop_instructions = "larl; lg; cgfi; jhe; lghi; " \
      +                               "lgfi;( iihf;)? lgfi;( iihf;)? basr; lg; br;$"
      
      From pypy.commits at gmail.com  Thu Jan 28 16:57:44 2016
      From: pypy.commits at gmail.com (mattip)
      Date: Thu, 28 Jan 2016 13:57:44 -0800 (PST)
      Subject: [pypy-commit] pypy cpyext-ext: merge default into branch
      Message-ID: <56aa8ed8.41dfc20a.e49d.ffffc387@mx.google.com>
      
      Author: mattip 
      Branch: cpyext-ext
      Changeset: r82000:da987f28cbfb
      Date: 2016-01-28 23:56 +0200
      http://bitbucket.org/pypy/pypy/changeset/da987f28cbfb/
      
      Log:	merge default into branch
      
      diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
      --- a/pypy/doc/faq.rst
      +++ b/pypy/doc/faq.rst
      @@ -54,7 +54,8 @@
       It is quite common nowadays that xyz is available on PyPI_ and
       installable with ``pip install xyz``.  The simplest solution is to `use
       virtualenv (as documented here)`_.  Then enter (activate) the virtualenv
      -and type: ``pip install xyz``.
      +and type: ``pip install xyz``.  If you don't know or don't want virtualenv,
      +you can also install ``pip`` globally by saying ``pypy -m ensurepip``.
       
       If you get errors from the C compiler, the module is a CPython C
       Extension module using unsupported features.  `See below.`_
      diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
      --- a/pypy/doc/whatsnew-head.rst
      +++ b/pypy/doc/whatsnew-head.rst
      @@ -133,3 +133,9 @@
       `rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen
       for traces containing a large number of pure getfield operations.
       
      +.. branch: exctrans
      +
      +Try to ensure that no new functions get annotated during the 'source_c' phase.
      +Refactor sandboxing to operate at a higher level.
      +
      +.. branch: cpyext-bootstrap
      diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
      --- a/pypy/module/cpyext/api.py
      +++ b/pypy/module/cpyext/api.py
      @@ -441,8 +441,8 @@
       TYPES = {}
       GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur
           '_Py_NoneStruct#': ('PyObject*', 'space.w_None'),
      -    '_Py_TrueStruct#': ('PyObject*', 'space.w_True'),
      -    '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'),
      +    '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'),
      +    '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'),
           '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'),
           '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'),
           'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'),
      @@ -505,7 +505,9 @@
       def get_structtype_for_ctype(ctype):
           from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
           from pypy.module.cpyext.cdatetime import PyDateTime_CAPI
      +    from pypy.module.cpyext.intobject import PyIntObject
           return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr,
      +            "PyIntObject*": PyIntObject,
                   "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype]
       
       PyTypeObject = lltype.ForwardReference()
      @@ -829,6 +831,7 @@
           space.fromcache(State).install_dll(eci)
       
           # populate static data
      +    builder = StaticObjectBuilder(space)
           for name, (typ, expr) in GLOBALS.iteritems():
               from pypy.module import cpyext
               w_obj = eval(expr)
      @@ -853,7 +856,7 @@
                       assert False, "Unknown static pointer: %s %s" % (typ, name)
                   ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value),
                                           ctypes.c_void_p).value
      -        elif typ in ('PyObject*', 'PyTypeObject*'):
      +        elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'):
                   if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'):
                       # we already have the pointer
                       in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name)
      @@ -862,17 +865,10 @@
                       # we have a structure, get its address
                       in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name)
                       py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll))
      -            from pypy.module.cpyext.pyobject import (
      -                track_reference, get_typedescr)
      -            w_type = space.type(w_obj)
      -            typedescr = get_typedescr(w_type.instancetypedef)
      -            py_obj.c_ob_refcnt = 1
      -            py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr,
      -                                         make_ref(space, w_type))
      -            typedescr.attach(space, py_obj, w_obj)
      -            track_reference(space, py_obj, w_obj)
      +            builder.prepare(py_obj, w_obj)
               else:
                   assert False, "Unknown static object: %s %s" % (typ, name)
      +    builder.attach_all()
       
           pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI')
       
      @@ -889,6 +885,36 @@
           setup_init_functions(eci, translating=False)
           return modulename.new(ext='')
       
      +
      +class StaticObjectBuilder:
      +    def __init__(self, space):
      +        self.space = space
      +        self.to_attach = []
      +
      +    def prepare(self, py_obj, w_obj):
      +        from pypy.module.cpyext.pyobject import track_reference
      +        py_obj.c_ob_refcnt = 1
      +        track_reference(self.space, py_obj, w_obj)
      +        self.to_attach.append((py_obj, w_obj))
      +
      +    def attach_all(self):
      +        from pypy.module.cpyext.pyobject import get_typedescr, make_ref
      +        from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2
      +        space = self.space
      +        space._cpyext_type_init = []
      +        for py_obj, w_obj in self.to_attach:
      +            w_type = space.type(w_obj)
      +            typedescr = get_typedescr(w_type.instancetypedef)
      +            py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr,
      +                                         make_ref(space, w_type))
      +            typedescr.attach(space, py_obj, w_obj)
      +        cpyext_type_init = space._cpyext_type_init
      +        del space._cpyext_type_init
      +        for pto, w_type in cpyext_type_init:
      +            finish_type_1(space, pto)
      +            finish_type_2(space, pto, w_type)
      +
      +
       def mangle_name(prefix, name):
           if name.startswith('Py'):
               return prefix + name[2:]
      @@ -1068,6 +1094,7 @@
           use_micronumpy = space.config.objspace.usemodules.micronumpy
           if not use_micronumpy:
               return use_micronumpy
      +    # import to register api functions by side-effect
           import pypy.module.cpyext.ndarrayobject 
           global GLOBALS, SYMBOLS_C, separate_module_files
           GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)")
      @@ -1096,14 +1123,33 @@
           run_bootstrap_functions(space)
           setup_va_functions(eci)
       
      +    from pypy.module import cpyext   # for eval() below
      +
      +    # Set up the types.  Needs a special case, because of the
      +    # immediate cycle involving 'c_ob_type', and because we don't
      +    # want these types to be Py_TPFLAGS_HEAPTYPE.
      +    static_types = {}
      +    for name, (typ, expr) in GLOBALS.items():
      +        if typ == 'PyTypeObject*':
      +            pto = lltype.malloc(PyTypeObject, immortal=True,
      +                                zero=True, flavor='raw')
      +            pto.c_ob_refcnt = 1
      +            pto.c_tp_basicsize = -1
      +            static_types[name] = pto
      +    builder = StaticObjectBuilder(space)
      +    for name, pto in static_types.items():
      +        pto.c_ob_type = static_types['PyType_Type#']
      +        w_type = eval(GLOBALS[name][1])
      +        builder.prepare(rffi.cast(PyObject, pto), w_type)
      +    builder.attach_all()
      +
           # populate static data
           for name, (typ, expr) in GLOBALS.iteritems():
               name = name.replace("#", "")
               if name.startswith('PyExc_'):
                   name = '_' + name
      -        from pypy.module import cpyext
               w_obj = eval(expr)
      -        if typ in ('PyObject*', 'PyTypeObject*'):
      +        if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'):
                   struct_ptr = make_ref(space, w_obj)
               elif typ == 'PyDateTime_CAPI*':
                   continue
      diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
      --- a/pypy/module/cpyext/test/test_typeobject.py
      +++ b/pypy/module/cpyext/test/test_typeobject.py
      @@ -392,6 +392,11 @@
               module = self.import_extension('foo', [
                   ("test_type", "METH_O",
                    '''
      +                 /* "args->ob_type" is a strange way to get at 'type',
      +                    which should have a different tp_getattro/tp_setattro
      +                    than its tp_base, which is 'object'.
      +                  */
      +                  
                        if (!args->ob_type->tp_setattro)
                        {
                            PyErr_SetString(PyExc_ValueError, "missing tp_setattro");
      @@ -400,8 +405,12 @@
                        if (args->ob_type->tp_setattro ==
                            args->ob_type->tp_base->tp_setattro)
                        {
      -                     PyErr_SetString(PyExc_ValueError, "recursive tp_setattro");
      -                     return NULL;
      +                     /* Note that unlike CPython, in PyPy 'type.tp_setattro'
      +                        is the same function as 'object.tp_setattro'.  This
      +                        test used to check that it was not, but that was an
      +                        artifact of the bootstrap logic only---in the final
      +                        C sources I checked and they are indeed the same.
      +                        So we ignore this problem here. */
                        }
                        if (!args->ob_type->tp_getattro)
                        {
      diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
      --- a/pypy/module/cpyext/typeobject.py
      +++ b/pypy/module/cpyext/typeobject.py
      @@ -229,7 +229,7 @@
                   assert len(slot_names) == 2
                   struct = getattr(pto, slot_names[0])
                   if not struct:
      -                assert not space.config.translating
      +                #assert not space.config.translating
                       assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
                       if slot_names[0] == 'c_tp_as_number':
                           STRUCT_TYPE = PyNumberMethods
      @@ -393,55 +393,6 @@
                          realize=type_realize,
                          dealloc=type_dealloc)
       
      -    # some types are difficult to create because of cycles.
      -    # - object.ob_type = type
      -    # - type.ob_type   = type
      -    # - tuple.ob_type  = type
      -    # - type.tp_base   = object
      -    # - tuple.tp_base  = object
      -    # - type.tp_bases is a tuple
      -    # - object.tp_bases is a tuple
      -    # - tuple.tp_bases is a tuple
      -
      -    # insert null placeholders to please create_ref()
      -    track_reference(space, lltype.nullptr(PyObject.TO), space.w_type)
      -    track_reference(space, lltype.nullptr(PyObject.TO), space.w_object)
      -    track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple)
      -    track_reference(space, lltype.nullptr(PyObject.TO), space.w_str)
      -
      -    # create the objects
      -    py_type = create_ref(space, space.w_type)
      -    py_object = create_ref(space, space.w_object)
      -    py_tuple = create_ref(space, space.w_tuple)
      -    py_str = create_ref(space, space.w_str)
      -    # XXX py_str is not initialized here correctly, because we are
      -    #     not tracking it, it gets an empty c_ob_type from py_basestring
      -
      -    # form cycles
      -    pto_type = rffi.cast(PyTypeObjectPtr, py_type)
      -    py_type.c_ob_type = pto_type
      -    py_object.c_ob_type = pto_type
      -    py_tuple.c_ob_type = pto_type
      -
      -    pto_object = rffi.cast(PyTypeObjectPtr, py_object)
      -    pto_type.c_tp_base = pto_object
      -    pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple)
      -    pto_tuple.c_tp_base = pto_object
      -
      -    pto_type.c_tp_bases.c_ob_type = pto_tuple
      -    pto_object.c_tp_bases.c_ob_type = pto_tuple
      -    pto_tuple.c_tp_bases.c_ob_type = pto_tuple
      -
      -    for typ in (py_type, py_object, py_tuple, py_str):
      -        heaptype = rffi.cast(PyHeapTypeObject, typ)
      -        heaptype.c_ht_name.c_ob_type = pto_type
      -
      -    # Restore the mapping
      -    track_reference(space, py_type, space.w_type, replace=True)
      -    track_reference(space, py_object, space.w_object, replace=True)
      -    track_reference(space, py_tuple, space.w_tuple, replace=True)
      -    track_reference(space, py_str, space.w_str, replace=True)
      -
       @cpython_api([PyObject], lltype.Void, external=False)
       def subtype_dealloc(space, obj):
           pto = obj.c_ob_type
      @@ -558,6 +509,8 @@
           pto.c_tp_as_sequence = heaptype.c_as_sequence
           pto.c_tp_as_mapping = heaptype.c_as_mapping
           pto.c_tp_as_buffer = heaptype.c_as_buffer
      +    pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
      +    pto.c_tp_itemsize = 0
       
           return rffi.cast(PyObject, heaptype)
       
      @@ -593,8 +546,6 @@
               pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name)
           else:
               pto.c_tp_name = rffi.str2charp(w_type.name)
      -    pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
      -    pto.c_tp_itemsize = 0
           # uninitialized fields:
           # c_tp_print
           # XXX implement
      @@ -602,8 +553,11 @@
           w_base = best_base(space, w_type.bases_w)
           pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base))
       
      -    finish_type_1(space, pto)
      -    finish_type_2(space, pto, w_type)
      +    if hasattr(space, '_cpyext_type_init'):
      +        space._cpyext_type_init.append((pto, w_type))
      +    else:
      +        finish_type_1(space, pto)
      +        finish_type_2(space, pto, w_type)
       
           pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct)
           if pto.c_tp_base:
      diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py
      --- a/rpython/memory/gctransform/boehm.py
      +++ b/rpython/memory/gctransform/boehm.py
      @@ -74,7 +74,7 @@
       
           def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size,
                                                                          c_offset_to_length):
      -        # XXX same behavior for zero=True: in theory that's wrong        
      +        # XXX same behavior for zero=True: in theory that's wrong
               if c_offset_to_length is None:
                   v_raw = hop.genop("direct_call",
                                      [self.malloc_varsize_no_length_ptr, v_length,
      @@ -156,6 +156,11 @@
                                 resulttype = lltype.Signed)
               hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result)
       
      +    def gcheader_initdata(self, defnode):
      +        hdr = lltype.malloc(self.HDR, immortal=True)
      +        hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr())
      +        return hdr._obj
      +
       
       ########## weakrefs ##########
       # Boehm: weakref objects are small structures containing only a Boehm
      diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py
      --- a/rpython/memory/gctransform/framework.py
      +++ b/rpython/memory/gctransform/framework.py
      @@ -288,7 +288,6 @@
       
               s_gcref = SomePtr(llmemory.GCREF)
               gcdata = self.gcdata
      -        translator = self.translator
               #use the GC flag to find which malloc method to use
               #malloc_zero_filled == Ture -> malloc_fixedsize/varsize_clear
               #malloc_zero_filled == Flase -> malloc_fixedsize/varsize
      @@ -322,7 +321,7 @@
                           GCClass.malloc_varsize.im_func,
                           [s_gc, s_typeid16]
                           + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref)
      -        
      +
               self.collect_ptr = getfn(GCClass.collect.im_func,
                   [s_gc, annmodel.SomeInteger()], annmodel.s_None)
               self.can_move_ptr = getfn(GCClass.can_move.im_func,
      @@ -1385,7 +1384,7 @@
                                       [v] + previous_steps + [c_name, c_null])
                           else:
                               llops.genop('bare_setfield', [v, c_name, c_null])
      -         
      +
                   return
               elif isinstance(TYPE, lltype.Array):
                   ITEM = TYPE.OF
      @@ -1412,6 +1411,25 @@
                                   resulttype=llmemory.Address)
               llops.genop('raw_memclear', [v_adr, v_totalsize])
       
      +    def gcheader_initdata(self, defnode):
      +        o = lltype.top_container(defnode.obj)
      +        needs_hash = self.get_prebuilt_hash(o) is not None
      +        hdr = self.gc_header_for(o, needs_hash)
      +        return hdr._obj
      +
      +    def get_prebuilt_hash(self, obj):
      +        # for prebuilt objects that need to have their hash stored and
      +        # restored.  Note that only structures that are StructNodes all
      +        # the way have their hash stored (and not e.g. structs with var-
      +        # sized arrays at the end).  'obj' must be the top_container.
      +        TYPE = lltype.typeOf(obj)
      +        if not isinstance(TYPE, lltype.GcStruct):
      +            return None
      +        if TYPE._is_varsize():
      +            return None
      +        return getattr(obj, '_hash_cache_', None)
      +
      +
       
       class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):
       
      diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py
      --- a/rpython/memory/gctransform/refcounting.py
      +++ b/rpython/memory/gctransform/refcounting.py
      @@ -285,3 +285,7 @@
                                 resulttype=llmemory.Address)
               hop.genop("direct_call", [self.identityhash_ptr, v_adr],
                         resultvar=hop.spaceop.result)
      +
      +    def gcheader_initdata(self, defnode):
      +        top = lltype.top_container(defnode.obj)
      +        return self.gcheaderbuilder.header_of_object(top)._obj
      diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py
      --- a/rpython/memory/gctransform/test/test_framework.py
      +++ b/rpython/memory/gctransform/test/test_framework.py
      @@ -40,7 +40,7 @@
           t.config.translation.gc = "minimark"
           cbuild = CStandaloneBuilder(t, entrypoint, t.config,
                                       gcpolicy=FrameworkGcPolicy2)
      -    db = cbuild.generate_graphs_for_llinterp()
      +    db = cbuild.build_database()
           entrypointptr = cbuild.getentrypointptr()
           entrygraph = entrypointptr._obj.graph
       
      @@ -69,7 +69,7 @@
               return -x
           t = rtype(g, [int])
           gg = graphof(t, g)
      -    assert not CollectAnalyzer(t).analyze_direct_call(gg)    
      +    assert not CollectAnalyzer(t).analyze_direct_call(gg)
       
       def test_cancollect_external():
           fext1 = rffi.llexternal('fext1', [], lltype.Void, releasegil=False)
      @@ -110,12 +110,12 @@
       
           def entrypoint(argv):
               return g() + 2
      -    
      +
           t = rtype(entrypoint, [s_list_of_strings])
           t.config.translation.gc = "minimark"
           cbuild = CStandaloneBuilder(t, entrypoint, t.config,
                                       gcpolicy=FrameworkGcPolicy2)
      -    db = cbuild.generate_graphs_for_llinterp()
      +    db = cbuild.build_database()
       
       def test_no_collect_detection():
           from rpython.rlib import rgc
      @@ -134,12 +134,13 @@
       
           def entrypoint(argv):
               return g() + 2
      -    
      +
           t = rtype(entrypoint, [s_list_of_strings])
           t.config.translation.gc = "minimark"
           cbuild = CStandaloneBuilder(t, entrypoint, t.config,
                                       gcpolicy=FrameworkGcPolicy2)
      -    f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp)
      +    with py.test.raises(Exception) as f:
      +        cbuild.build_database()
           expected = "'no_collect' function can trigger collection:  0:
               size = rffi.cast(rffi.SIZE_T, length)
      @@ -85,15 +85,24 @@
               return loader
       
       def reraise_error(error, loader):
      -    if   error == 1: raise OSError(load_int(loader), "external error")
      -    elif error == 2: raise IOError
      -    elif error == 3: raise OverflowError
      -    elif error == 4: raise ValueError
      -    elif error == 5: raise ZeroDivisionError
      -    elif error == 6: raise MemoryError
      -    elif error == 7: raise KeyError
      -    elif error == 8: raise IndexError
      -    else:            raise RuntimeError
      +    if error == 1:
      +        raise OSError(load_int(loader), "external error")
      +    elif error == 2:
      +        raise IOError
      +    elif error == 3:
      +        raise OverflowError
      +    elif error == 4:
      +        raise ValueError
      +    elif error == 5:
      +        raise ZeroDivisionError
      +    elif error == 6:
      +        raise MemoryError
      +    elif error == 7:
      +        raise KeyError
      +    elif error == 8:
      +        raise IndexError
      +    else:
      +        raise RuntimeError
       
       
       @signature(types.str(), returns=types.impossible())
      @@ -101,51 +110,46 @@
           STDERR = 2
           with rffi.scoped_str2charp(msg + '\n') as buf:
               writeall_not_sandboxed(STDERR, buf, len(msg) + 1)
      -    raise RuntimeError(msg)  # XXX in RPython, the msg is ignored at the moment
      +    raise RuntimeError(msg)  # XXX in RPython, the msg is ignored
      +
      +def make_stub(fnname, msg):
      +    """Build always-raising stub function to replace unsupported external."""
      +    log.WARNING(msg)
      +
      +    def execute(*args):
      +        not_implemented_stub(msg)
      +    execute.__name__ = 'sandboxed_%s' % (fnname,)
      +    return execute
      +
      +def sig_ll(fnobj):
      +    FUNCTYPE = lltype.typeOf(fnobj)
      +    args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS]
      +    s_result = lltype_to_annotation(FUNCTYPE.RESULT)
      +    return args_s, s_result
       
       dump_string = rmarshal.get_marshaller(str)
      -load_int    = rmarshal.get_loader(int)
      +load_int = rmarshal.get_loader(int)
       
      -def get_external_function_sandbox_graph(fnobj, db, force_stub=False):
      -    """Build the graph of a helper trampoline function to be used
      -    in place of real calls to the external function 'fnobj'.  The
      -    trampoline marshals its input arguments, dumps them to STDOUT,
      -    and waits for an answer on STDIN.
      +def get_sandbox_stub(fnobj, rtyper):
      +    fnname = fnobj._name
      +    args_s, s_result = sig_ll(fnobj)
      +    msg = "Not implemented: sandboxing for external function '%s'" % (fnname,)
      +    execute = make_stub(fnname, msg)
      +    return _annotate(rtyper, execute, args_s, s_result)
      +
      +def make_sandbox_trampoline(fnname, args_s, s_result):
      +    """Create a trampoline function with the specified signature.
      +
      +    The trampoline is meant to be used in place of real calls to the external
      +    function named 'fnname'.  It marshals its input arguments, dumps them to
      +    STDOUT, and waits for an answer on STDIN.
           """
      -    if getattr(getattr(fnobj, '_callable', None),
      -               '_sandbox_external_name', None):
      -        fnname = fnobj._callable._sandbox_external_name
      -    else:
      -        fnname = fnobj._name
      -    if hasattr(fnobj, 'graph'):
      -        # get the annotation of the input arguments and the result
      -        graph = fnobj.graph
      -        annotator = db.translator.annotator
      -        args_s = [annotator.binding(v) for v in graph.getargs()]
      -        s_result = annotator.binding(graph.getreturnvar())
      -    else:
      -        # pure external function - fall back to the annotations
      -        # corresponding to the ll types
      -        FUNCTYPE = lltype.typeOf(fnobj)
      -        args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS]
      -        s_result = lltype_to_annotation(FUNCTYPE.RESULT)
      -
           try:
      -        if force_stub:   # old case - don't try to support suggested_primitive
      -            raise NotImplementedError("sandboxing for external function '%s'"
      -                                      % (fnname,))
      -
               dump_arguments = rmarshal.get_marshaller(tuple(args_s))
               load_result = rmarshal.get_loader(s_result)
      -
      -    except (NotImplementedError,
      -            rmarshal.CannotMarshal,
      -            rmarshal.CannotUnmarshall), e:
      -        msg = 'Not Implemented: %s' % (e,)
      -        log.WARNING(msg)
      -        def execute(*args):
      -            not_implemented_stub(msg)
      -
      +    except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e:
      +        msg = "Cannot sandbox function '%s': %s" % (fnname, e)
      +        execute = make_stub(fnname, msg)
           else:
               def execute(*args):
                   # marshal the function name and input arguments
      @@ -158,9 +162,12 @@
                   result = load_result(loader)
                   loader.check_finished()
                   return result
      -    execute = func_with_new_name(execute, 'sandboxed_' + fnname)
      +        execute.__name__ = 'sandboxed_%s' % (fnname,)
      +    return execute
       
      -    ann = MixLevelHelperAnnotator(db.translator.rtyper)
      -    graph = ann.getgraph(execute, args_s, s_result)
      +
      +def _annotate(rtyper, f, args_s, s_result):
      +    ann = MixLevelHelperAnnotator(rtyper)
      +    graph = ann.getgraph(f, args_s, s_result)
           ann.finish()
           return graph
      
      From pypy.commits at gmail.com  Thu Jan 28 18:38:34 2016
      From: pypy.commits at gmail.com (mjacob)
      Date: Thu, 28 Jan 2016 15:38:34 -0800 (PST)
      Subject: [pypy-commit] pypy py3k: Fix import for renamed module rposix ->
       rposix_stat.
      Message-ID: <56aaa67a.c177c20a.46fc9.ffffe116@mx.google.com>
      
      Author: Manuel Jacob 
      Branch: py3k
      Changeset: r82001:ae38513dafd2
      Date: 2016-01-28 23:32 +0100
      http://bitbucket.org/pypy/pypy/changeset/ae38513dafd2/
      
      Log:	Fix import for renamed module rposix -> rposix_stat.
      
      diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
      --- a/pypy/module/imp/importing.py
      +++ b/pypy/module/imp/importing.py
      @@ -15,7 +15,7 @@
       from rpython.rlib.streamio import StreamErrors
       from rpython.rlib.objectmodel import we_are_translated, specialize
       from rpython.rlib.signature import signature
      -from rpython.rlib import rposix, types
      +from rpython.rlib import rposix_stat, types
       from pypy.module.sys.version import PYPY_VERSION
       
       _WIN32 = sys.platform == 'win32'
      @@ -465,7 +465,7 @@
       
               # Directory should not exist
               try:
      -            st = rposix.stat(_WIN32Path(path) if win32 else path)
      +            st = rposix_stat.stat(_WIN32Path(path) if win32 else path)
               except OSError:
                   pass
               else:
      
      From pypy.commits at gmail.com  Fri Jan 29 04:32:22 2016
      From: pypy.commits at gmail.com (plan_rich)
      Date: Fri, 29 Jan 2016 01:32:22 -0800 (PST)
      Subject: [pypy-commit] pypy s390x-backend: splitting tests in zarch backend,
       should speed up things quite a bit
      Message-ID: <56ab31a6.c96cc20a.32d30.6272@mx.google.com>
      
      Author: Richard Plangger 
      Branch: s390x-backend
      Changeset: r82002:986957b12f49
      Date: 2016-01-29 10:30 +0100
      http://bitbucket.org/pypy/pypy/changeset/986957b12f49/
      
      Log:	splitting tests in zarch backend, should speed up things quite a bit
      
      diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py
      --- a/pypy/testrunner_cfg.py
      +++ b/pypy/testrunner_cfg.py
      @@ -5,6 +5,7 @@
           'translator/c', 'rlib',
           'memory/test', 'jit/metainterp',
           'jit/backend/arm', 'jit/backend/x86',
      +    'jit/backend/zarch',
       ]
       
       def collect_one_testdir(testdirs, reldir, tests):
      
      From pypy.commits at gmail.com  Fri Jan 29 09:07:44 2016
      From: pypy.commits at gmail.com (mattip)
      Date: Fri, 29 Jan 2016 06:07:44 -0800 (PST)
      Subject: [pypy-commit] pypy cffi-embedding-win32: first stab at fixing
       embedding for win32
      Message-ID: <56ab7230.4e8e1c0a.7088e.ffffb701@mx.google.com>
      
      Author: mattip 
      Branch: cffi-embedding-win32
      Changeset: r82003:237a8c5c0202
      Date: 2016-01-29 16:06 +0200
      http://bitbucket.org/pypy/pypy/changeset/237a8c5c0202/
      
      Log:	first stab at fixing embedding for win32
      
      diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py
      --- a/pypy/module/_cffi_backend/embedding.py
      +++ b/pypy/module/_cffi_backend/embedding.py
      @@ -84,11 +84,68 @@
           return rffi.cast(rffi.INT, res)
       
       # ____________________________________________________________
      +if os.name == 'nt':
      +    do_startup = r'''
      +#include 
      +#define WIN32_LEAN_AND_MEAN
      +#include 
      +RPY_EXPORTED void rpython_startup_code(void);
      +RPY_EXPORTED int pypy_setup_home(char *, int);
       
      +static unsigned char _cffi_ready = 0;
      +static const char *volatile _cffi_module_name;
       
      -eci = ExternalCompilationInfo(separate_module_sources=[
      -r"""
      -/* XXX Windows missing */
      +static void _cffi_init_error(const char *msg, const char *extra)
      +{
      +    fprintf(stderr,
      +            "\nPyPy initialization failure when loading module '%s':\n%s%s\n",
      +            _cffi_module_name, msg, extra);
      +}
      +
      +BOOL CALLBACK _cffi_init(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex)
      +{
      +
      +    HMODULE hModule;
      +    TCHAR home[_MAX_PATH];
      +    rpython_startup_code();
      +    RPyGilAllocate();
      +
      +    GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | 
      +                       GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
      +                       (LPCTSTR)&_cffi_init, &hModule);
      +    if (hModule == 0 ) {
      +        /* TODO turn the int into a string with FormatMessage */
      +        
      +        _cffi_init_error("dladdr() failed: ", "");
      +        return TRUE;
      +    }
      +    GetModuleFileName(hModule, home, _MAX_PATH);
      +    if (pypy_setup_home(home, 1) != 0) {
      +        _cffi_init_error("pypy_setup_home() failed", "");
      +        return TRUE;
      +    }
      +    _cffi_ready = 1;
      +    fprintf(stderr, "startup succeeded, home %s\n", home);
      +    return TRUE;
      +}
      +
      +RPY_EXPORTED
      +int pypy_carefully_make_gil(const char *name)
      +{
      +    /* For CFFI: this initializes the GIL and loads the home path.
      +       It can be called completely concurrently from unrelated threads.
      +       It assumes that we don't hold the GIL before (if it exists), and we
      +       don't hold it afterwards.
      +    */
      +    static INIT_ONCE s_init_once;
      +
      +    _cffi_module_name = name;    /* not really thread-safe, but better than
      +                                    nothing */
      +    InitOnceExecuteOnce(&s_init_once, _cffi_init, NULL, NULL);
      +    return (int)_cffi_ready - 1;
      +}'''
      +else:
      +    do_startup = r"""
       #include 
       #include 
       #include 
      @@ -141,6 +198,7 @@
           pthread_once(&once_control, _cffi_init);
           return (int)_cffi_ready - 1;
       }
      -"""])
      +"""
      +eci = ExternalCompilationInfo(separate_module_sources=[do_startup])
       
       declare_c_function = rffi.llexternal_use_eci(eci)
      
      From pypy.commits at gmail.com  Fri Jan 29 15:51:45 2016
      From: pypy.commits at gmail.com (arigo)
      Date: Fri, 29 Jan 2016 12:51:45 -0800 (PST)
      Subject: [pypy-commit] extradoc extradoc: Slides
      Message-ID: <56abd0e1.c177c20a.46fc9.72d0@mx.google.com>
      
      Author: Armin Rigo 
      Branch: extradoc
      Changeset: r5597:a7b5322ad0b4
      Date: 2016-01-29 21:51 +0100
      http://bitbucket.org/pypy/extradoc/changeset/a7b5322ad0b4/
      
      Log:	Slides
      
      diff --git a/talk/fosdem2016/slides b/talk/fosdem2016/slides
      new file mode 100644
      --- /dev/null
      +++ b/talk/fosdem2016/slides
      @@ -0,0 +1,405 @@
      +=============
      +CFFI and PyPy
      +=============
      +
      +
      +CFFI
      +====
      +
      +* successful project according to PyPI
      +
      +* 3.4 million downloads last month
      +
      +* total 19.2 millions, 27th place on `pypi-ranking.info`
      +
      +  - Django is 28th
      +
      +* some high-visibility projects have switched to it (Cryptography)
      +
      +
      +PyPy
      +====
      +
      +* harder to say, but probably not so successful
      +
      +* more later
      +
      +
      +CFFI
      +====
      +
      +
      +
      +CFFI
      +====
      +
      +* call C from Python
      +
      +* CFFI = C Foreign Function Interface
      +
      +* shares ideas from Cython, ctypes, and LuaJIT's FFI
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  $ man getpwuid
      +
      +  SYNOPSIS
      +         #include 
      +         #include 
      +
      +         struct passwd *getpwnam(const char *name);
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +       .
      +       .
      +       .
      +       The passwd structure is defined in  as follows:
      +
      +           struct passwd {
      +               char   *pw_name;       /* username */
      +               char   *pw_passwd;     /* user password */
      +               uid_t   pw_uid;        /* user ID */
      +       .
      +       .
      +       .
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  from cffi import FFI
      +  ffi = cffi.FFI()
      +
      +  ffi.cdef("""
      +      typedef int... uid_t;
      +      struct passwd {
      +          uid_t pw_uid;
      +          ...;
      +      };
      +      struct passwd *getpwnam(const char *name);
      +  """)
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  ffi.set_source("_pwuid_cffi", """
      +      #include 
      +      #include 
      +  """)
      +
      +  ffi.compile()
      +
      +------- ^^ put that in pwuid_build.py
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  python pwuid_build.py
      +
      +creates ``_pwuid_cffi.so``
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  from _pwuid_cffi import lib
      +
      +  print lib.getpwnam("arigo").pw_uid
      +
      +
      +CFFI demo
      +=========
      +
      +::
      +
      +  from _pwuid_cffi import ffi, lib
      +
      +* ``lib`` gives access to all functions from the cdef
      +
      +* ``ffi`` gives access to a few general helpers, e.g.
      +
      +   - ``ffi.cast("float", 42)``
      +
      +   - ``p = ffi.new("struct passwd *")``
      +
      +   - ``p = ffi.new("char[10]"); p[0] = 'X'; s = lib.getpwnam(p)``
      +
      +   - ``p = ffi.new_handle(random_obj); ...; random_obj = ffi.from_handle(p)``
      +
      +
      +CFFI
      +====
      +
      +* supports more or less the whole C
      +
      +* there is more than my short explanation suggests
      +
      +* read the docs: http://cffi.readthedocs.org/
      +
      +
      +
      +PyPy
      +====
      +
      +
      +PyPy
      +====
      +
      +* a Python interpreter
      +
      +* different from the standard, which is CPython
      +
      +* main goal of PyPy: speed
      +
      +
      +PyPy
      +====
      +
      +::
      +
      +    $ pypy
      +
      +    Python 2.7.10 (5f8302b8bf9f, Nov 18 2015, 10:46:46)
      +    [PyPy 4.0.1 with GCC 4.8.4] on linux2
      +    Type "help", "copyright", "credits" or "license" for more information.
      +    >>>> 2+3
      +    5
      +    >>>>
      +
      +
      +PyPy
      +====
      +
      +* run ``pypy my_program.py``
      +
      +* starts working like an interpreter
      +
      +* then a Just-in-Time Compiler kicks in
      +
      +* generate and execute machine code from the Python program
      +
      +* good or great speed-ups for the majority of long-running code
      +
      +
      +PyPy
      +====
      +
      +* different techniques than CPython also for "garbage collection"
      +
      +* works very well (arguably better than CPython's reference counting)
      +
      +
      +PyPy: Garbage Collection
      +========================
      +
      +* "**moving,** generational, incremental GC"
      +
      +* objects don't have reference counters
      +
      +* allocated in a "nursery"
      +
      +* when nursery full, find surviving nursery objects and move them out
      +
      +* usually work on nursery objects only (fast), but rarely also perform
      +  a full GC
      +
      +
      +PyPy: C extensions
      +==================
      +
      +* PyPy works great for running Python
      +
      +* less great when there are CPython C extension modules involved
      +
      +
      +PyPy: C extensions
      +==================
      +
      +* not directly possible: we have moving, non-reference-counted objects,
      +  and the C code expects non-moving, reference-counted objects
      +
      +
      +PyPy: C extensions
      +==================
      +
      +* PyPy has still some support for them, called its ``cpyext`` module
      +
      +* similar to IronPython's Ironclad
      +
      +* emulate all objects for C extensions with a shadow, non-movable,
      +  reference-counted object
      +
      +
      +PyPy: C extensions
      +==================
      +
      +* ``cpyext`` is slow
      +
      +* ``cpyext`` is actually *really, really* slow
      +
      +  - but we're working on making it *only* slow
      +
      +
      +PyPy: C extensions
      +==================
      +
      +* ``cpyext`` will "often" work, but there are a some high-profile C
      +  extension modules that are not supported so far
      +
      +* notably, ``numpy``
      +
      +* (it is future work)
      +
      +
      +PyPy: ad
      +========
      +
      +* but, hey, if you need performance out of Python and don't rely
      +  critically on C extension modules, then give PyPy a try
      +
      +  - typical area where it works well: web services
      +
      +
      +CPython C API: the problem
      +==========================
      +
      +* CPython comes with a C API
      +
      +* very large number of functions
      +
      +* assumes objects don't move
      +
      +* assumes a "reference counting" model
      +
      +
      +CPython C API
      +=============
      +
      +* actually, the API is some large subset of the functions inside
      +  CPython itself
      +
      +
      +CPython C API
      +=============
      +
      +* easy to use from C
      +
      +* historically, part of the success of Python
      +
      +
      +CPython C API
      +=============
      +
      +* further successful tools build on top of that API:
      +
      +  - SWIG
      +  - Cython
      +  - and other binding generators
      +  - now CFFI
      +
      +
      +CFFI
      +====
      +
      +* but CFFI is a bit different
      +
      +  - it does not expose any part of the CPython C API
      +
      +  - everything is done with a different, minimal API on the ``ffi`` object
      +    which is closer to C
      +
      +    - ``ffi.cast()``, ``ffi.new()``, etc.
      +
      +  - that means it can be directly ported
      +
      +
      +CFFI and PyPy
      +=============
      +
      +* we have a PyPy version of CFFI
      +
      +* the demos I have given above work equally well on CPython or on PyPy
      +
      +* (supporting PyPy was part of the core motivation behind CFFI)
      +
      +
      +CFFI: performance
      +=================
      +
      +* in PyPy, JIT compiler speeds up calls, so it's very fast
      +
      +* in CPython, it doesn't occur, but it is still reasonable when
      +  compared with alternatives
      +
      +* main issue is that we write more code in Python with CFFI,
      +  which makes it slower on CPython---but not really on PyPy
      +
      +
      +CFFI: summary
      +=============
      +
      +* call C from Python
      +
      +* works natively on CPython and on PyPy
      +
      +  - and easy to port to other Python implementations
      +
      +* supports CPython 2.6, 2.7, 3.2 to 3.5, and
      +  is integrated with PyPy
      +
      +
      +CFFI
      +====
      +
      +* independent on the particular details of the Python implementation
      +
      +  - using CFFI, you call C functions and manipulate C-pointer-like
      +    objects directly from Python
      +
      +  - you do in Python all logic involving Python objects
      +
      +  - there are no (official) ways around this API to call the CPython C
      +    API, and none are needed
      +
      +
      +CFFI
      +====
      +
      +* two reasons to switch to it ``:-)``
      +
      +  - easy and cool
      +
      +  - better supported on non-CPython implementations
      +
      +
      +CFFI: latest news
      +=================
      +
      +* support for "embedding" Python inside some other non-Python program
      +
      +  - now you really never need the CPython C API any more
      +
      +
      +CFFI
      +====
      +
      +http://cffi.readthedocs.org/
      
      From pypy.commits at gmail.com  Sat Jan 30 09:40:38 2016
      From: pypy.commits at gmail.com (arigo)
      Date: Sat, 30 Jan 2016 06:40:38 -0800 (PST)
      Subject: [pypy-commit] pypy.org extradoc: Add a warning: pypy3 is slow and
       old
      Message-ID: <56accb66.2815c20a.9e4b8.7371@mx.google.com>
      
      Author: Armin Rigo 
      Branch: extradoc
      Changeset: r698:d5286b71511a
      Date: 2016-01-30 15:40 +0100
      http://bitbucket.org/pypy/pypy.org/changeset/d5286b71511a/
      
      Log:	Add a warning: pypy3 is slow and old
      
      diff --git a/download.html b/download.html
      --- a/download.html
      +++ b/download.html
      @@ -134,6 +134,9 @@
       
    • Python 3.2.5 compatible PyPy3 2.4.0

      +

      Warning: this is (1) based on an old release of PyPy, and (2) only +supporting the Python 3.2 language. It's also known to be +(sometimes much) slower than PyPy 2.

      • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
      • Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
      • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -109,6 +109,12 @@ Python 3.2.5 compatible PyPy3 2.4.0 ----------------------------------- +.. class:: download_menu + + Warning: this is (1) based on an old release of PyPy, and (2) only + supporting the Python 3.2 language. It's also known to be + (sometimes much) slower than PyPy 2. + * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) * `Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian)`__ (see ``[1]`` below) From pypy.commits at gmail.com Sat Jan 30 11:56:24 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 30 Jan 2016 08:56:24 -0800 (PST) Subject: [pypy-commit] pypy vmprof-newstack: add a dummy write_all_objects Message-ID: <56aceb38.a3f6c20a.2ed55.ffff9fee@mx.google.com> Author: fijal Branch: vmprof-newstack Changeset: r82004:286b6293be5e Date: 2016-01-30 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/286b6293be5e/ Log: add a dummy write_all_objects diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py --- a/pypy/module/_vmprof/__init__.py +++ b/pypy/module/_vmprof/__init__.py @@ -11,6 +11,7 @@ interpleveldefs = { 'enable': 'interp_vmprof.enable', 'disable': 'interp_vmprof.disable', + 'write_all_code_objects': 'interp_vmprof.write_all_code_objects', 'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError', } diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -69,6 +69,11 @@ except rvmprof.VMProfError, e: raise VMProfError(space, e) +def write_all_code_objects(space): + """ Needed on cpython, just empty function here + """ + pass + def disable(space): """Disable vmprof. Remember to close the file descriptor afterwards if necessary. From pypy.commits at gmail.com Sat Jan 30 13:42:35 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Jan 2016 10:42:35 -0800 (PST) Subject: [pypy-commit] pypy SomeRange: Separate bk.newrange() from bk.newlist() Message-ID: <56ad041b.c2351c0a.a9cbc.38bc@mx.google.com> Author: Ronan Lamy Branch: SomeRange Changeset: r82006:02f319beadff Date: 2016-01-30 04:47 +0000 http://bitbucket.org/pypy/pypy/changeset/02f319beadff/ Log: Separate bk.newrange() from bk.newlist() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -180,15 +180,18 @@ listdef.listitem.__dict__.update(flags_if_new) return listdef - def newlist(self, *s_values, **flags): + def newlist(self, *s_values): """Make a SomeList associated with the current position, general enough to contain the s_values as items.""" - listdef = self.getlistdef(**flags) + listdef = self.getlistdef() for s_value in s_values: listdef.generalize(s_value) - if flags: - assert flags.keys() == ['range_step'] - listdef.generalize_range_step(flags['range_step']) + return SomeList(listdef) + + def newrange(self, s_item, step): + listdef = self.getlistdef(range_step=step) + listdef.generalize(s_item) + listdef.generalize_range_step(step) return SomeList(listdef) def getdictdef(self, is_r_dict=False, force_non_null=False): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -80,7 +80,7 @@ nonneg = s_stop.nonneg or (s_stop.is_constant() and s_stop.const >= -1) s_item = SomeInteger(nonneg=nonneg) - return getbookkeeper().newlist(s_item, range_step=step) + return getbookkeeper().newrange(s_item, step) builtin_xrange = builtin_range # xxx for now allow it From pypy.commits at gmail.com Sat Jan 30 13:42:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Jan 2016 10:42:39 -0800 (PST) Subject: [pypy-commit] pypy SomeRange: Create SomeRange class Message-ID: <56ad041f.11301c0a.4a215.6b16@mx.google.com> Author: Ronan Lamy Branch: SomeRange Changeset: r82008:273c3ace813a Date: 2016-01-30 17:40 +0000 http://bitbucket.org/pypy/pypy/changeset/273c3ace813a/ Log: Create SomeRange class diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -13,7 +13,8 @@ SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeException, - SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, + SomeImpossibleValue, SomeUnicodeString, SomeList, SomeRange, + HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) from rpython.annotator.classdesc import ClassDef, ClassDesc from rpython.annotator.listdef import ListDef, ListItem @@ -190,7 +191,7 @@ listdef.listitem.range_step = step listdef.generalize(s_item) listdef.generalize_range_step(step) - return SomeList(listdef) + return SomeRange(listdef) def getdictdef(self, is_r_dict=False, force_non_null=False): """Get the DictDef associated with the current position.""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -359,6 +359,9 @@ def noneify(self): return SomeList(self.listdef) +class SomeRange(SomeList): + pass + class SomeTuple(SomeObject): "Stands for a tuple of known length." diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -89,6 +89,9 @@ v_step = self._getstep(v_rng, hop) return hop.gendirectcall(ll_rangelen, v_rng, v_step) + def __eq__(self, other): + return other.__class__ is self.__class__ and other.__dict__ == self.__dict__ + class __extend__(pairtype(RangeRepr, IntegerRepr)): From pypy.commits at gmail.com Sat Jan 30 13:42:40 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Jan 2016 10:42:40 -0800 (PST) Subject: [pypy-commit] pypy SomeRange: Make range() lists immutable Message-ID: <56ad0420.cb371c0a.d64e2.36dd@mx.google.com> Author: Ronan Lamy Branch: SomeRange Changeset: r82009:3b3f8cea104f Date: 2016-01-30 18:16 +0000 http://bitbucket.org/pypy/pypy/changeset/3b3f8cea104f/ Log: Make range() lists immutable diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -9,7 +9,7 @@ SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeException, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, - SomeByteArray, SomeWeakRef, SomeSingleFloat, + SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeRange, SomeLongFloat, SomeType, SomeTypeOf, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -486,6 +486,12 @@ return lst1 inplace_mul.can_only_throw = [] +class __extend__(pairtype(SomeRange, SomeObject)): + + def inplace_mul((lst1, obj2)): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + class __extend__(pairtype(SomeTuple, SomeTuple)): def union((tup1, tup2)): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -14,7 +14,7 @@ SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeTypeOf, s_ImpossibleValue, s_Bool, s_None, s_Int, unionof, add_knowntypedata, - SomeWeakRef, SomeUnicodeString, SomeByteArray) + SomeWeakRef, SomeUnicodeString, SomeByteArray, SomeRange) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.binaryop import _dict_can_only_throw_keyerror @@ -429,6 +429,32 @@ check_negative_slice(s_start, s_stop) self.listdef.resize() +class __extend__(SomeRange): + + def method_append(self, s_value): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + + def method_extend(self, s_iterable): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + + def method_reverse(self): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + + def method_insert(self, s_index, s_value): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + + def method_remove(self, s_value): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + + def method_pop(self, s_index=None): + raise AnnotatorError( + "In RPython, lists returned by range() are immutable") + def check_negative_slice(s_start, s_stop, error="slicing"): if isinstance(s_start, SomeInteger) and not s_start.nonneg: raise AnnotatorError("%s: not proven to have non-negative start" % From pypy.commits at gmail.com Sat Jan 30 13:42:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Jan 2016 10:42:37 -0800 (PST) Subject: [pypy-commit] pypy SomeRange: Inline bk.getlistdef() Message-ID: <56ad041d.2457c20a.d484b.ffffca67@mx.google.com> Author: Ronan Lamy Branch: SomeRange Changeset: r82007:b1921df13e50 Date: 2016-01-30 06:03 +0000 http://bitbucket.org/pypy/pypy/changeset/b1921df13e50/ Log: Inline bk.getlistdef() diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -171,25 +171,23 @@ clsdefs = {self.getuniqueclassdef(cls) for cls in exc_classes} return SomeException(clsdefs) - def getlistdef(self, **flags_if_new): - """Get the ListDef associated with the current position.""" + def newlist(self, *s_values): + """Make a SomeList associated with the current position, general + enough to contain the s_values as items.""" try: listdef = self.listdefs[self.position_key] except KeyError: listdef = self.listdefs[self.position_key] = ListDef(self) - listdef.listitem.__dict__.update(flags_if_new) - return listdef - - def newlist(self, *s_values): - """Make a SomeList associated with the current position, general - enough to contain the s_values as items.""" - listdef = self.getlistdef() for s_value in s_values: listdef.generalize(s_value) return SomeList(listdef) def newrange(self, s_item, step): - listdef = self.getlistdef(range_step=step) + try: + listdef = self.listdefs[self.position_key] + except KeyError: + listdef = self.listdefs[self.position_key] = ListDef(self) + listdef.listitem.range_step = step listdef.generalize(s_item) listdef.generalize_range_step(step) return SomeList(listdef) From pypy.commits at gmail.com Sat Jan 30 13:42:33 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 30 Jan 2016 10:42:33 -0800 (PST) Subject: [pypy-commit] pypy SomeRange: Merge rpython.rtyper.lltypesystem.rrange into rpython.rtyper.rrange Message-ID: <56ad0419.8378c20a.4e5cb.ffffc96f@mx.google.com> Author: Ronan Lamy Branch: SomeRange Changeset: r82005:d458d86b6ce2 Date: 2016-01-29 19:08 +0000 http://bitbucket.org/pypy/pypy/changeset/d458d86b6ce2/ Log: Merge rpython.rtyper.lltypesystem.rrange into rpython.rtyper.rrange diff --git a/rpython/rtyper/lltypesystem/rrange.py b/rpython/rtyper/lltypesystem/rrange.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rrange.py +++ /dev/null @@ -1,98 +0,0 @@ -from rpython.rtyper.lltypesystem.lltype import Ptr, GcStruct, Signed, malloc, Void -from rpython.rtyper.rrange import AbstractRangeRepr, AbstractRangeIteratorRepr -from rpython.rtyper.error import TyperError - -# ____________________________________________________________ -# -# Concrete implementation of RPython lists that are returned by range() -# and never mutated afterwards: -# -# struct range { -# Signed start, stop; // step is always constant -# } -# -# struct rangest { -# Signed start, stop, step; // rare case, for completeness -# } - -def ll_length(l): - if l.step > 0: - lo = l.start - hi = l.stop - step = l.step - else: - lo = l.stop - hi = l.start - step = -l.step - if hi <= lo: - return 0 - n = (hi - lo - 1) // step + 1 - return n - -def ll_getitem_fast(l, index): - return l.start + index * l.step - -RANGEST = GcStruct("range", ("start", Signed), ("stop", Signed), ("step", Signed), - adtmeths = { - "ll_length":ll_length, - "ll_getitem_fast":ll_getitem_fast, - }, - hints = {'immutable': True}) -RANGESTITER = GcStruct("range", ("next", Signed), ("stop", Signed), ("step", Signed)) - -class RangeRepr(AbstractRangeRepr): - - RANGEST = Ptr(RANGEST) - RANGESTITER = Ptr(RANGESTITER) - - getfield_opname = "getfield" - - def __init__(self, step, *args): - self.RANGE = Ptr(GcStruct("range", ("start", Signed), ("stop", Signed), - adtmeths = { - "ll_length":ll_length, - "ll_getitem_fast":ll_getitem_fast, - "step":step, - }, - hints = {'immutable': True})) - self.RANGEITER = Ptr(GcStruct("range", ("next", Signed), ("stop", Signed))) - AbstractRangeRepr.__init__(self, step, *args) - self.ll_newrange = ll_newrange - self.ll_newrangest = ll_newrangest - - def make_iterator_repr(self, variant=None): - if variant is not None: - raise TyperError("unsupported %r iterator over a range list" % - (variant,)) - return RangeIteratorRepr(self) - - -def ll_newrange(RANGE, start, stop): - l = malloc(RANGE.TO) - l.start = start - l.stop = stop - return l - -def ll_newrangest(start, stop, step): - if step == 0: - raise ValueError - l = malloc(RANGEST) - l.start = start - l.stop = stop - l.step = step - return l - -class RangeIteratorRepr(AbstractRangeIteratorRepr): - - def __init__(self, *args): - AbstractRangeIteratorRepr.__init__(self, *args) - self.ll_rangeiter = ll_rangeiter - -def ll_rangeiter(ITERPTR, rng): - iter = malloc(ITERPTR.TO) - iter.next = rng.start - iter.stop = rng.stop - if ITERPTR.TO is RANGESTITER: - iter.step = rng.step - return iter - diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -44,7 +44,7 @@ s_value = listitem.s_value if (listitem.range_step is not None and not listitem.mutated and not isinstance(s_value, annmodel.SomeImpossibleValue)): - from rpython.rtyper.lltypesystem.rrange import RangeRepr + from rpython.rtyper.rrange import RangeRepr return RangeRepr(listitem.range_step) else: # cannot do the rtyper.getrepr() call immediately, for the case diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -1,19 +1,79 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr +from rpython.rtyper.lltypesystem.lltype import ( + Ptr, GcStruct, Signed, malloc, Void) from rpython.rtyper.rlist import dum_nocheck, dum_checkidx from rpython.rtyper.rmodel import Repr, IteratorRepr from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype +# ____________________________________________________________ +# +# Concrete implementation of RPython lists that are returned by range() +# and never mutated afterwards: +# +# struct range { +# Signed start, stop; // step is always constant +# } +# +# struct rangest { +# Signed start, stop, step; // rare case, for completeness +# } -class AbstractRangeRepr(Repr): +def ll_length(l): + if l.step > 0: + lo = l.start + hi = l.stop + step = l.step + else: + lo = l.stop + hi = l.start + step = -l.step + if hi <= lo: + return 0 + n = (hi - lo - 1) // step + 1 + return n + +def ll_getitem_fast(l, index): + return l.start + index * l.step + +RANGEST = GcStruct("range", ("start", Signed), ("stop", Signed), ("step", Signed), + adtmeths = { + "ll_length":ll_length, + "ll_getitem_fast":ll_getitem_fast, + }, + hints = {'immutable': True}) +RANGESTITER = GcStruct("range", ("next", Signed), ("stop", Signed), ("step", Signed)) + +class RangeRepr(Repr): + + RANGEST = Ptr(RANGEST) + RANGESTITER = Ptr(RANGESTITER) + + getfield_opname = "getfield" + def __init__(self, step): + self.RANGE = Ptr(GcStruct("range", ("start", Signed), ("stop", Signed), + adtmeths = { + "ll_length":ll_length, + "ll_getitem_fast":ll_getitem_fast, + "step":step, + }, + hints = {'immutable': True})) + self.RANGEITER = Ptr(GcStruct("range", ("next", Signed), ("stop", Signed))) self.step = step if step != 0: self.lowleveltype = self.RANGE else: self.lowleveltype = self.RANGEST + self.ll_newrange = ll_newrange + self.ll_newrangest = ll_newrangest + + def make_iterator_repr(self, variant=None): + if variant is not None: + raise TyperError("unsupported %r iterator over a range list" % + (variant,)) + return RangeIteratorRepr(self) def _getstep(self, v_rng, hop): return hop.genop(self.getfield_opname, @@ -29,7 +89,9 @@ v_step = self._getstep(v_rng, hop) return hop.gendirectcall(ll_rangelen, v_rng, v_step) -class __extend__(pairtype(AbstractRangeRepr, IntegerRepr)): + + +class __extend__(pairtype(RangeRepr, IntegerRepr)): def rtype_getitem((r_rng, r_int), hop): if hop.has_implicit_exception(IndexError): @@ -89,6 +151,21 @@ index += length return l.start + index * step +def ll_newrange(RANGE, start, stop): + l = malloc(RANGE.TO) + l.start = start + l.stop = stop + return l + +def ll_newrangest(start, stop, step): + if step == 0: + raise ValueError + l = malloc(RANGEST) + l.start = start + l.stop = stop + l.step = step + return l + # ____________________________________________________________ # # Irregular operations. @@ -105,7 +182,7 @@ if isinstance(vstep, Constant) and vstep.value == 0: # not really needed, annotator catches it. Just in case... raise TyperError("range cannot have a const step of zero") - if isinstance(hop.r_result, AbstractRangeRepr): + if isinstance(hop.r_result, RangeRepr): if hop.r_result.step != 0: c_rng = hop.inputconst(Void, hop.r_result.RANGE) hop.exception_is_here() @@ -142,13 +219,14 @@ # # Iteration. -class AbstractRangeIteratorRepr(IteratorRepr): +class RangeIteratorRepr(IteratorRepr): def __init__(self, r_rng): self.r_rng = r_rng if r_rng.step != 0: self.lowleveltype = r_rng.RANGEITER else: self.lowleveltype = r_rng.RANGESTITER + self.ll_rangeiter = ll_rangeiter def newiter(self, hop): v_rng, = hop.inputargs(self.r_rng) @@ -169,6 +247,14 @@ hop.exception_is_here() return hop.gendirectcall(llfn, v_iter, *args) +def ll_rangeiter(ITERPTR, rng): + iter = malloc(ITERPTR.TO) + iter.next = rng.start + iter.stop = rng.stop + if ITERPTR.TO is RANGESTITER: + iter.step = rng.step + return iter + def ll_rangenext_up(iter, step): next = iter.next if next >= iter.stop: diff --git a/rpython/rtyper/test/test_rrange.py b/rpython/rtyper/test/test_rrange.py --- a/rpython/rtyper/test/test_rrange.py +++ b/rpython/rtyper/test/test_rrange.py @@ -1,6 +1,7 @@ from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.rrange import ll_rangelen, ll_rangeitem, ll_rangeitem_nonneg, dum_nocheck -from rpython.rtyper.lltypesystem import rrange +from rpython.rtyper.rrange import ( + ll_rangelen, ll_rangeitem, ll_rangeitem_nonneg, dum_nocheck, ll_newrangest, + RangeRepr, ll_newrange) from rpython.rtyper.test.tool import BaseRtypingTest @@ -11,11 +12,11 @@ expected = range(start, stop, step) length = len(expected) if varstep: - l = rrange.ll_newrangest(start, stop, step) + l = ll_newrangest(start, stop, step) step = l.step else: - RANGE = rrange.RangeRepr(step).RANGE - l = rrange.ll_newrange(RANGE, start, stop) + RANGE = RangeRepr(step).RANGE + l = ll_newrange(RANGE, start, stop) assert ll_rangelen(l, step) == length lst = [ll_rangeitem(dum_nocheck, l, i, step) for i in range(length)] assert lst == expected From pypy.commits at gmail.com Sat Jan 30 16:46:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 30 Jan 2016 13:46:26 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: updated slides, check in the html version Message-ID: <56ad2f32.ccaa1c0a.adf90.6aa1@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5598:8b46adc6da63 Date: 2016-01-30 22:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/8b46adc6da63/ Log: updated slides, check in the html version diff --git a/talk/fosdem2016/slides b/talk/fosdem2016/slides --- a/talk/fosdem2016/slides +++ b/talk/fosdem2016/slides @@ -324,7 +324,7 @@ - it does not expose any part of the CPython C API - - everything is done with a different, minimal API on the ``ffi`` object + - everything is done with a minimal API on the ``ffi`` object which is closer to C - ``ffi.cast()``, ``ffi.new()``, etc. diff --git a/talk/fosdem2016/slides.html b/talk/fosdem2016/slides.html new file mode 100644 --- /dev/null +++ b/talk/fosdem2016/slides.html @@ -0,0 +1,728 @@ + + + + + + + +CFFI and PyPy + + + + + + + + + + + + + + +
        +
        +
        + + +
        +
        +
        +

        CFFI and PyPy

        + + +
        +
        +

        CFFI

        +
          +
        • successful project according to PyPI
        • +
        • 3.4 million downloads last month
        • +
        • total 19.2 millions, 27th place on pypi-ranking.info
            +
          • Django is 28th
          • +
          +
        • +
        • some high-visibility projects have switched to it (Cryptography)
        • +
        +
        +
        +

        PyPy

        +
          +
        • harder to say, but probably not so successful
        • +
        • more later
        • +
        +
        +
        +

        CFFI

        +
        +
        +

        CFFI

        +
          +
        • call C from Python
        • +
        • CFFI = C Foreign Function Interface
        • +
        • shares ideas from Cython, ctypes, and LuaJIT's FFI
        • +
        +
        +
        +

        CFFI demo

        +
        +$ man getpwuid
        +
        +SYNOPSIS
        +       #include <sys/types.h>
        +       #include <pwd.h>
        +
        +       struct passwd *getpwnam(const char *name);
        +
        +
        +
        +

        CFFI demo

        +
        +.
        +.
        +.
        +The passwd structure is defined in <pwd.h> as follows:
        +
        +    struct passwd {
        +        char   *pw_name;       /* username */
        +        char   *pw_passwd;     /* user password */
        +        uid_t   pw_uid;        /* user ID */
        +.
        +.
        +.
        +
        +
        +
        +

        CFFI demo

        +
        +from cffi import FFI
        +ffi = cffi.FFI()
        +
        +ffi.cdef("""
        +    typedef int... uid_t;
        +    struct passwd {
        +        uid_t pw_uid;
        +        ...;
        +    };
        +    struct passwd *getpwnam(const char *name);
        +""")
        +
        +
        +
        +

        CFFI demo

        +
        +ffi.set_source("_pwuid_cffi", """
        +    #include <sys/types.h>
        +    #include <pwd.h>
        +""")
        +
        +ffi.compile()
        +
        +

        ------- ^^ put that in pwuid_build.py

        +
        +
        +

        CFFI demo

        +
        +python pwuid_build.py
        +
        +

        creates _pwuid_cffi.so

        +
        +
        +

        CFFI demo

        +
        +from _pwuid_cffi import lib
        +
        +print lib.getpwnam("arigo").pw_uid
        +
        +
        +
        +

        CFFI demo

        +
        +from _pwuid_cffi import ffi, lib
        +
        +
          +
        • lib gives access to all functions from the cdef

          +
        • +
        • ffi gives access to a few general helpers, e.g.

          +
          +
            +
          • ffi.cast("float", 42)
          • +
          • p = ffi.new("struct passwd *")
          • +
          • p = ffi.new("char[10]"); p[0] = 'X'; s = lib.getpwnam(p)
          • +
          • p = ffi.new_handle(random_obj); ...; random_obj = ffi.from_handle(p)
          • +
          +
          +
        • +
        +
        +
        +

        CFFI

        + +
        +
        +

        PyPy

        +
        +
        +

        PyPy

        +
          +
        • a Python interpreter
        • +
        • different from the standard, which is CPython
        • +
        • main goal of PyPy: speed
        • +
        +
        +
        +

        PyPy

        +
        +$ pypy
        +
        +Python 2.7.10 (5f8302b8bf9f, Nov 18 2015, 10:46:46)
        +[PyPy 4.0.1 with GCC 4.8.4] on linux2
        +Type "help", "copyright", "credits" or "license" for more information.
        +>>>> 2+3
        +5
        +>>>>
        +
        +
        +
        +

        PyPy

        +
          +
        • run pypy my_program.py
        • +
        • starts working like an interpreter
        • +
        • then a Just-in-Time Compiler kicks in
        • +
        • generate and execute machine code from the Python program
        • +
        • good or great speed-ups for the majority of long-running code
        • +
        +
        +
        +

        PyPy

        +
          +
        • different techniques than CPython also for "garbage collection"
        • +
        • works very well (arguably better than CPython's reference counting)
        • +
        +
        +
        +

        PyPy: Garbage Collection

        +
          +
        • "moving, generational, incremental GC"
        • +
        • objects don't have reference counters
        • +
        • allocated in a "nursery"
        • +
        • when nursery full, find surviving nursery objects and move them out
        • +
        • usually work on nursery objects only (fast), but rarely also perform +a full GC
        • +
        +
        +
        +

        PyPy: C extensions

        +
          +
        • PyPy works great for running Python
        • +
        • less great when there are CPython C extension modules involved
        • +
        +
        +
        +

        PyPy: C extensions

        +
          +
        • not directly possible: we have moving, non-reference-counted objects, +and the C code expects non-moving, reference-counted objects
        • +
        +
        +
        +

        PyPy: C extensions

        +
          +
        • PyPy has still some support for them, called its cpyext module
        • +
        • similar to IronPython's Ironclad
        • +
        • emulate all objects for C extensions with a shadow, non-movable, +reference-counted object
        • +
        +
        +
        +

        PyPy: C extensions

        +
          +
        • cpyext is slow
        • +
        • cpyext is actually really, really slow
            +
          • but we're working on making it only slow
          • +
          +
        • +
        +
        +
        +

        PyPy: C extensions

        +
          +
        • cpyext will "often" work, but there are a some high-profile C +extension modules that are not supported so far
        • +
        • notably, numpy
        • +
        • (it is future work)
        • +
        +
        +
        +

        PyPy: ad

        +
          +
        • but, hey, if you need performance out of Python and don't rely +critically on C extension modules, then give PyPy a try
            +
          • typical area where it works well: web services
          • +
          +
        • +
        +
        +
        +

        CPython C API: the problem

        +
          +
        • CPython comes with a C API
        • +
        • very large number of functions
        • +
        • assumes objects don't move
        • +
        • assumes a "reference counting" model
        • +
        +
        +
        +

        CPython C API

        +
          +
        • actually, the API is some large subset of the functions inside +CPython itself
        • +
        +
        +
        +

        CPython C API

        +
          +
        • easy to use from C
        • +
        • historically, part of the success of Python
        • +
        +
        +
        +

        CPython C API

        +
          +
        • further successful tools build on top of that API:
            +
          • SWIG
          • +
          • Cython
          • +
          • and other binding generators
          • +
          • now CFFI
          • +
          +
        • +
        +
        +
        +

        CFFI

        +
          +
        • but CFFI is a bit different
            +
          • it does not expose any part of the CPython C API
          • +
          • everything is done with a minimal API on the ffi object +which is closer to C
              +
            • ffi.cast(), ffi.new(), etc.
            • +
            +
          • +
          • that means it can be directly ported
          • +
          +
        • +
        +
        +
        +

        CFFI and PyPy

        +
          +
        • we have a PyPy version of CFFI
        • +
        • the demos I have given above work equally well on CPython or on PyPy
        • +
        • (supporting PyPy was part of the core motivation behind CFFI)
        • +
        +
        +
        +

        CFFI: performance

        +
          +
        • in PyPy, JIT compiler speeds up calls, so it's very fast
        • +
        • in CPython, it doesn't occur, but it is still reasonable when +compared with alternatives
        • +
        • main issue is that we write more code in Python with CFFI, +which makes it slower on CPython---but not really on PyPy
        • +
        +
        +
        +

        CFFI: summary

        +
          +
        • call C from Python
        • +
        • works natively on CPython and on PyPy
            +
          • and easy to port to other Python implementations
          • +
          +
        • +
        • supports CPython 2.6, 2.7, 3.2 to 3.5, and +is integrated with PyPy
        • +
        +
        +
        +

        CFFI

        +
          +
        • independent on the particular details of the Python implementation
            +
          • using CFFI, you call C functions and manipulate C-pointer-like +objects directly from Python
          • +
          • you do in Python all logic involving Python objects
          • +
          • there are no (official) ways around this API to call the CPython C +API, and none are needed
          • +
          +
        • +
        +
        +
        +

        CFFI

        +
          +
        • two reasons to switch to it :-)
            +
          • easy and cool
          • +
          • better supported on non-CPython implementations
          • +
          +
        • +
        +
        +
        +

        CFFI: latest news

        +
          +
        • support for "embedding" Python inside some other non-Python program
            +
          • now you really never need the CPython C API any more
          • +
          +
        • +
        +
        + +
        + + From pypy.commits at gmail.com Sun Jan 31 10:21:05 2016 From: pypy.commits at gmail.com (devin.jeanpierre) Date: Sun, 31 Jan 2016 07:21:05 -0800 (PST) Subject: [pypy-commit] pypy default: Cast to PyObject* in PyString_GET_SIZE/AS_STRING. Message-ID: <56ae2661.01cdc20a.16151.ffffe72a@mx.google.com> Author: Devin Jeanpierre Branch: Changeset: r82010:f1c5a5f9e0b8 Date: 2016-01-31 02:58 -0800 http://bitbucket.org/pypy/pypy/changeset/f1c5a5f9e0b8/ Log: Cast to PyObject* in PyString_GET_SIZE/AS_STRING. This way we accept PyStringObject*, much like CPython casts to PyStringObject* in order to accept PyObject*. diff --git a/pypy/module/cpyext/include/stringobject.h b/pypy/module/cpyext/include/stringobject.h --- a/pypy/module/cpyext/include/stringobject.h +++ b/pypy/module/cpyext/include/stringobject.h @@ -7,8 +7,8 @@ extern "C" { #endif -#define PyString_GET_SIZE(op) PyString_Size(op) -#define PyString_AS_STRING(op) PyString_AsString(op) +#define PyString_GET_SIZE(op) PyString_Size((PyObject*)(op)) +#define PyString_AS_STRING(op) PyString_AsString((PyObject*)(op)) typedef struct { PyObject_HEAD From pypy.commits at gmail.com Sun Jan 31 10:30:23 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 31 Jan 2016 07:30:23 -0800 (PST) Subject: [pypy-commit] cffi default: Issue #244: parse_type() calls the Parser logic but self._options used Message-ID: <56ae288f.a453c20a.d2467.00b7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2615:6b198bbbad9f Date: 2016-01-31 16:29 +0100 http://bitbucket.org/cffi/cffi/changeset/6b198bbbad9f/ Log: Issue #244: parse_type() calls the Parser logic but self._options used to be None, crashing in corner cases diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -220,7 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._options = None + self._options = {} self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -374,7 +374,7 @@ def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) - if self._options['dllexport']: + if self._options.get('dllexport'): tag = 'dllexport_python ' elif self._inside_extern_python: tag = 'extern_python ' @@ -450,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._options['override']: + if not self._options.get('override'): raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -729,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._options['packed'] + tp.packed = self._options.get('packed') if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1846,3 +1846,8 @@ thread.start_new_thread(f, ()) time.sleep(1.5) assert seen == ['init!', 'init done'] + 6 * [7] + + def test_sizeof_struct_directly(self): + # only works with the Python FFI instances + ffi = FFI(backend=self.Backend()) + assert ffi.sizeof("struct{int a;}") == ffi.sizeof("int") From pypy.commits at gmail.com Sun Jan 31 11:12:19 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 31 Jan 2016 08:12:19 -0800 (PST) Subject: [pypy-commit] cffi embedding-pypy-win32: allow embedding tests to run on pypy win32 Message-ID: <56ae3263.a453c20a.d2467.0d29@mx.google.com> Author: mattip Branch: embedding-pypy-win32 Changeset: r2616:6b5048b944cf Date: 2016-01-30 20:50 +0200 http://bitbucket.org/cffi/cffi/changeset/6b5048b944cf/ Log: allow embedding tests to run on pypy win32 diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -549,24 +549,25 @@ if value not in lst: lst.append(value) # - if '__pypy__' in sys.builtin_module_names: - if hasattr(sys, 'prefix'): - import os - ensure('library_dirs', os.path.join(sys.prefix, 'bin')) - pythonlib = "pypy-c" + if sys.platform == "win32": + # XXX pypy should not reuse the same import library name + template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' else: - if sys.platform == "win32": - template = "python%d%d" - if hasattr(sys, 'gettotalrefcount'): - template += '_d' + if '__pypy__' in sys.builtin_module_names: + if hasattr(sys, 'prefix'): + import os + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + pythonlib = "pypy-c" else: template = "python%d.%d" if sysconfig.get_config_var('DEBUG_EXT'): template += sysconfig.get_config_var('DEBUG_EXT') - pythonlib = (template % - (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) - if hasattr(sys, 'abiflags'): - pythonlib += sys.abiflags + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags ensure('libraries', pythonlib) if sys.platform == "win32": ensure('extra_link_args', '/MANIFEST') diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -118,12 +118,18 @@ def execute(self, name): path = self.get_path() env_extra = {'PYTHONPATH': prefix_pythonpath()} - libpath = os.environ.get('LD_LIBRARY_PATH') - if libpath: - libpath = path + ':' + libpath + if sys.platform == 'win32': + _path = os.environ.get('PATH') + # for libpypy-c.dll or Python27.dll + _path += ';' + os.path.split(sys.executable)[0] + env_extra['PATH'] = _path else: - libpath = path - env_extra['LD_LIBRARY_PATH'] = libpath + libpath = os.environ.get('LD_LIBRARY_PATH') + if libpath: + libpath = path + ':' + libpath + else: + libpath = path + env_extra['LD_LIBRARY_PATH'] = libpath print('running %r in %r' % (name, path)) executable_name = name if sys.platform == 'win32': From pypy.commits at gmail.com Sun Jan 31 15:12:50 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 12:12:50 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix class name. Message-ID: <56ae6ac2.8e301c0a.511ad.ffffbc1b@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82012:250569a438a0 Date: 2016-01-28 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/250569a438a0/ Log: Fix class name. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -275,7 +275,7 @@ return space.wrap(r) W_FilterFalse.typedef = TypeDef( - 'itertools.ifilterfalse', + 'itertools.filterfalse', __new__ = interp2app(W_FilterFalse___new__), __iter__ = interp2app(W_FilterFalse.iter_w), __next__ = interp2app(W_FilterFalse.next_w), From pypy.commits at gmail.com Sun Jan 31 15:12:48 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 12:12:48 -0800 (PST) Subject: [pypy-commit] pypy py3.3: pickle support for itertools.takewhile and dropwhile. Message-ID: <56ae6ac0.42cbc20a.1c2fa.443b@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82011:6e3ddc38f7c0 Date: 2016-01-28 21:21 +0100 http://bitbucket.org/pypy/pypy/changeset/6e3ddc38f7c0/ Log: pickle support for itertools.takewhile and dropwhile. diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -148,7 +148,7 @@ def __init__(self, space, w_predicate, w_iterable): self.space = space self.w_predicate = w_predicate - self.iterable = space.iter(w_iterable) + self.w_iterable = space.iter(w_iterable) self.stopped = False def iter_w(self): @@ -158,7 +158,7 @@ if self.stopped: raise OperationError(self.space.w_StopIteration, self.space.w_None) - w_obj = self.space.next(self.iterable) # may raise a w_StopIteration + w_obj = self.space.next(self.w_iterable) # may raise a w_StopIteration w_bool = self.space.call_function(self.w_predicate, w_obj) if not self.space.is_true(w_bool): self.stopped = True @@ -166,6 +166,16 @@ return w_obj + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([self.w_predicate, self.w_iterable]), + space.wrap(self.stopped) + ]) + + def descr_setstate(self, space, w_state): + self.stopped = space.bool_w(w_state) + def W_TakeWhile___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_TakeWhile, w_subtype) r.__init__(space, w_predicate, w_iterable) @@ -177,6 +187,8 @@ __new__ = interp2app(W_TakeWhile___new__), __iter__ = interp2app(W_TakeWhile.iter_w), __next__ = interp2app(W_TakeWhile.next_w), + __reduce__ = interp2app(W_TakeWhile.descr_reduce), + __setstate__ = interp2app(W_TakeWhile.descr_setstate), __doc__ = """Make an iterator that returns elements from the iterable as long as the predicate is true. @@ -195,7 +207,7 @@ def __init__(self, space, w_predicate, w_iterable): self.space = space self.w_predicate = w_predicate - self.iterable = space.iter(w_iterable) + self.w_iterable = space.iter(w_iterable) self.started = False def iter_w(self): @@ -203,10 +215,10 @@ def next_w(self): if self.started: - w_obj = self.space.next(self.iterable) # may raise w_StopIteration + w_obj = self.space.next(self.w_iterable) # may raise w_StopIter else: while True: - w_obj = self.space.next(self.iterable) # may raise w_StopIter + w_obj = self.space.next(self.w_iterable) # may raise w_StopIter w_bool = self.space.call_function(self.w_predicate, w_obj) if not self.space.is_true(w_bool): self.started = True @@ -214,6 +226,16 @@ return w_obj + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([self.w_predicate, self.w_iterable]), + space.wrap(self.started) + ]) + + def descr_setstate(self, space, w_state): + self.started = space.bool_w(w_state) + def W_DropWhile___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_DropWhile, w_subtype) r.__init__(space, w_predicate, w_iterable) @@ -225,6 +247,8 @@ __new__ = interp2app(W_DropWhile___new__), __iter__ = interp2app(W_DropWhile.iter_w), __next__ = interp2app(W_DropWhile.next_w), + __reduce__ = interp2app(W_DropWhile.descr_reduce), + __setstate__ = interp2app(W_DropWhile.descr_setstate), __doc__ = """Make an iterator that drops elements from the iterable as long as the predicate is true; afterwards, returns every element. Note, the iterator does not produce any output until the diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -836,12 +836,6 @@ "usemodules": ['itertools', 'struct', 'binascii'], } - def setup_class(cls): - if cls.space.is_true(cls.space.appexec([], """(): - import sys; return sys.version_info < (2, 7) - """)): - py.test.skip("Requires Python 2.7") - def test_compress(self): import itertools it = itertools.compress(['a', 'b', 'c'], [0, 1, 0]) @@ -1001,6 +995,16 @@ assert list(itertools.islice( pickle.loads(pickle.dumps(c)), 10)) == list('bcabcabcab') + def test_takewhile_pickle(self): + data = [1, 2, 3, 0, 4, 5, 6] + import itertools, pickle + t = itertools.takewhile(bool, data) + next(t) + assert list(pickle.loads(pickle.dumps(t))) == [2, 3] + t = itertools.dropwhile(bool, data) + next(t) + assert list(pickle.loads(pickle.dumps(t))) == [4, 5, 6] + class AppTestItertools32: spaceconfig = dict(usemodules=['itertools']) From pypy.commits at gmail.com Sun Jan 31 15:12:52 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 12:12:52 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Pickle support for itertools.islice Message-ID: <56ae6ac4.624fc20a.d8ba2.4e9e@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82013:a5416d54ea4e Date: 2016-01-31 21:11 +0100 http://bitbucket.org/pypy/pypy/changeset/a5416d54ea4e/ Log: Pickle support for itertools.islice diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -404,6 +404,15 @@ if num <= 0: break + def descr_reduce(self, space): + return space.newtuple([ + space.type(self), + space.newtuple([self.iterable, + space.wrap(self.start), + space.wrap(self.stop), + space.wrap(self.ignore + 1)]), + ]) + def W_ISlice___new__(space, w_subtype, w_iterable, w_startstop, args_w): r = space.allocate_instance(W_ISlice, w_subtype) r.__init__(space, w_iterable, w_startstop, args_w) @@ -414,6 +423,7 @@ __new__ = interp2app(W_ISlice___new__), __iter__ = interp2app(W_ISlice.iter_w), __next__ = interp2app(W_ISlice.next_w), + __reduce__ = interp2app(W_ISlice.descr_reduce), __doc__ = """Make an iterator that returns selected elements from the iterable. If start is non-zero, then elements from the iterable are skipped until start is reached. Afterward, elements are diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -988,6 +988,11 @@ assert list(op(testIntermediate)) == [ (0,1,3), (0,2,3), (1,2,3)] + def test_islice_pickle(self): + import itertools, pickle + it = itertools.islice(range(100), 10, 20, 3) + assert list(pickle.loads(pickle.dumps(it))) == list(range(100)[10:20:3]) + def test_cycle_pickle(self): import itertools, pickle c = itertools.cycle('abc') From pypy.commits at gmail.com Sun Jan 31 16:19:47 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 31 Jan 2016 13:19:47 -0800 (PST) Subject: [pypy-commit] pypy default: skip for msvc Message-ID: <56ae7a73.01adc20a.c27d9.5a99@mx.google.com> Author: mattip Branch: Changeset: r82014:aa367ba93c8a Date: 2016-01-31 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/aa367ba93c8a/ Log: skip for msvc diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -277,10 +277,14 @@ assert not rffi_platform.has("x", "#include ") def test_has_0002(): + if platform.name == 'msvc': + py.test.skip('no m.lib in msvc') assert rffi_platform.has("pow", "#include ", libraries=["m"]) def test_has_0003(): """multiple libraries""" + if platform.name == 'msvc': + py.test.skip('no m.lib in msvc') assert rffi_platform.has("pow", "#include ", libraries=["m", "c"]) def test_has_0004(): From pypy.commits at gmail.com Sun Jan 31 16:37:30 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 13:37:30 -0800 (PST) Subject: [pypy-commit] pypy py3.3: For some reason this chunk was not copied from CPython 3.3 Message-ID: <56ae7e9a.6217c20a.5f363.70a6@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82015:b28c0f7ae8f3 Date: 2016-01-31 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/b28c0f7ae8f3/ Log: For some reason this chunk was not copied from CPython 3.3 diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_tw.c @@ -55,7 +55,7 @@ TRYMAP_DEC(big5, **outbuf, c, IN2) { NEXT(2, 1) } - else return 2; + else return 1; } return 0; @@ -109,7 +109,7 @@ TRYMAP_DEC(cp950ext, **outbuf, c, IN2); else TRYMAP_DEC(big5, **outbuf, c, IN2); - else return 2; + else return 1; NEXT(2, 1) } From pypy.commits at gmail.com Sun Jan 31 16:37:32 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 13:37:32 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Add _ssl.RAND_bytes Message-ID: <56ae7e9c.c9ebc20a.e381f.638f@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82016:9a47d88b9973 Date: 2016-01-31 22:29 +0100 http://bitbucket.org/pypy/pypy/changeset/9a47d88b9973/ Log: Add _ssl.RAND_bytes diff --git a/pypy/module/_ssl/__init__.py b/pypy/module/_ssl/__init__.py --- a/pypy/module/_ssl/__init__.py +++ b/pypy/module/_ssl/__init__.py @@ -43,8 +43,8 @@ if HAVE_OPENSSL_RAND: Module.interpleveldefs['RAND_add'] = "interp_ssl.RAND_add" - Module.interpleveldefs['RAND_bytes'] = "space.w_None" # so far - Module.interpleveldefs['RAND_pseudo_bytes'] = "space.w_None" # so far + Module.interpleveldefs['RAND_bytes'] = "interp_ssl.RAND_bytes" + Module.interpleveldefs['RAND_pseudo_bytes'] = "interp_ssl.RAND_pseudo_bytes" Module.interpleveldefs['RAND_status'] = "interp_ssl.RAND_status" Module.interpleveldefs['RAND_egd'] = "interp_ssl.RAND_egd" diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -237,6 +237,43 @@ with rffi.scoped_str2charp(string) as buf: libssl_RAND_add(buf, len(string), entropy) + def _RAND_bytes(space, n, pseudo): + if n < 0: + raise OperationError(space.w_ValueError, space.wrap( + "num must be positive")) + + with rffi.scoped_alloc_buffer(n) as buf: + if pseudo: + ok = libssl_RAND_pseudo_bytes( + rffi.cast(rffi.UCHARP, buf.raw), n) + if ok == 0 or ok == 1: + return space.newtuple([ + space.wrapbytes(buf.str(n)), + space.wrap(ok == 1), + ]) + else: + ok = libssl_RAND_bytes( + rffi.cast(rffi.UCHARP, buf.raw), n) + if ok == 1: + return space.wrapbytes(buf.str(n)) + + raise ssl_error(space, "", errcode=libssl_ERR_get_error()) + + @unwrap_spec(n=int) + def RAND_bytes(space, n): + """RAND_bytes(n) -> bytes + + Generate n cryptographically strong pseudo-random bytes.""" + return _RAND_bytes(space, n, pseudo=False) + + @unwrap_spec(n=int) + def RAND_pseudo_bytes(space, n): + """RAND_pseudo_bytes(n) -> (bytes, is_cryptographic) + + Generate n pseudo-random bytes. is_cryptographic is True if the bytes + generated are cryptographically strong.""" + return _RAND_bytes(space, n, pseudo=True) + def RAND_status(space): """RAND_status() -> 0 or 1 diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -48,6 +48,16 @@ raises(TypeError, _ssl.RAND_add, "xyz", "zyx") _ssl.RAND_add("xyz", 1.2345) + def test_RAND_bytes(self): + import _ssl + b = _ssl.RAND_bytes(3) + assert type(b) is bytes + assert len(b) == 3 + b, ok = _ssl.RAND_pseudo_bytes(3) + assert type(b) is bytes + assert len(b) == 3 + assert ok is True or ok is False + def test_RAND_status(self): import _ssl if not hasattr(_ssl, "RAND_status"): diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -311,6 +311,8 @@ if HAVE_OPENSSL_RAND: ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void) + ssl_external('RAND_bytes', [rffi.UCHARP, rffi.INT], rffi.INT) + ssl_external('RAND_pseudo_bytes', [rffi.UCHARP, rffi.INT], rffi.INT) ssl_external('RAND_status', [], rffi.INT) if HAVE_OPENSSL_RAND_EGD: ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) From pypy.commits at gmail.com Sun Jan 31 17:47:19 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 31 Jan 2016 14:47:19 -0800 (PST) Subject: [pypy-commit] pypy default: duh, check the index first(!) Message-ID: <56ae8ef7.a151c20a.c3de6.ffff8690@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82018:3fe07a7b1c13 Date: 2016-01-31 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/3fe07a7b1c13/ Log: duh, check the index first(!) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -116,7 +116,7 @@ def _find_map_attr(self, name, index): while isinstance(self, PlainAttribute): - if name == self.name and index == self.index: + if index == self.index and name == self.name: return self self = self.back return None @@ -296,7 +296,7 @@ new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) def delete(self, obj, name, index): - if name == self.name and index == self.index: + if index == self.index and name == self.name: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True From pypy.commits at gmail.com Sun Jan 31 17:47:21 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 31 Jan 2016 14:47:21 -0800 (PST) Subject: [pypy-commit] pypy default: this comment is outdated now Message-ID: <56ae8ef9.2968c20a.c229f.ffff81fe@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82019:a71c1f3776a9 Date: 2016-01-31 19:11 +0100 http://bitbucket.org/pypy/pypy/changeset/a71c1f3776a9/ Log: this comment is outdated now diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -156,7 +156,6 @@ jit.isconstant(name) and jit.isconstant(index)) def add_attr(self, obj, name, index, w_value): - # grumble, jit needs this attr = self._get_new_attr(name, index) oldattr = obj._get_mapdict_map() if not jit.we_are_jitted(): From pypy.commits at gmail.com Sun Jan 31 17:47:17 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 31 Jan 2016 14:47:17 -0800 (PST) Subject: [pypy-commit] pypy default: fix typo Message-ID: <56ae8ef5.0357c20a.58781.7513@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82017:8a7217a547e2 Date: 2016-01-31 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/8a7217a547e2/ Log: fix typo diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -942,7 +942,7 @@ return False if w_set.length() == 0: return True - # it's possible to have 0-lenght strategy that's not empty + # it's possible to have 0-length strategy that's not empty if w_set.strategy is w_other.strategy: return self._issubset_unwrapped(w_set, w_other) if not self.may_contain_equal_elements(w_other.strategy): From pypy.commits at gmail.com Sun Jan 31 18:42:20 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 15:42:20 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Fix unicode.capitalize() test to pass with CPython3.3, Message-ID: <56ae9bdc.a3abc20a.ddb4.ffff9400@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82021:44aa48e4d16a Date: 2016-02-01 00:31 +0100 http://bitbucket.org/pypy/pypy/changeset/44aa48e4d16a/ Log: Fix unicode.capitalize() test to pass with CPython3.3, and implement it for PyPy. Probably not the fastest implementation... diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -217,7 +217,7 @@ # check that titlecased chars are lowered correctly # \u1ffc is the titlecased char assert ('\u1ff3\u1ff3\u1ffc\u1ffc'.capitalize() == - '\u1ffc\u1ff3\u1ff3\u1ff3') + '\u03a9\u0399\u1ff3\u1ff3\u1ff3') # check with cased non-letter chars assert ('\u24c5\u24ce\u24c9\u24bd\u24c4\u24c3'.capitalize() == '\u24c5\u24e8\u24e3\u24d7\u24de\u24dd') diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -155,13 +155,16 @@ return unicodedb.islinebreak(ord(ch)) def _upper(self, ch): - return unichr(unicodedb.toupper(ord(ch))) + return u''.join([unichr(x) for x in + unicodedb.toupper_full(ord(ch))]) def _lower(self, ch): - return unichr(unicodedb.tolower(ord(ch))) + return u''.join([unichr(x) for x in + unicodedb.tolower_full(ord(ch))]) def _title(self, ch): - return unichr(unicodedb.totitle(ord(ch))) + return u''.join([unichr(x) for x in + unicodedb.totitle_full(ord(ch))]) def _newlist_unwrapped(self, space, lst): return space.newlist_unicode(lst) From pypy.commits at gmail.com Sun Jan 31 18:42:22 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 15:42:22 -0800 (PST) Subject: [pypy-commit] pypy py3.3: Another test that needed a change for CPython3.3 Message-ID: <56ae9bde.4c5c1c0a.24191.fffffab2@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82022:f41f8a7c1fb8 Date: 2016-02-01 00:40 +0100 http://bitbucket.org/pypy/pypy/changeset/f41f8a7c1fb8/ Log: Another test that needed a change for CPython3.3 diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -729,7 +729,7 @@ assert 'ababa'.count('aba') == 1 def test_swapcase(self): - assert '\xe4\xc4\xdf'.swapcase() == '\xc4\xe4\xdf' + assert '\xe4\xc4\xdf'.swapcase() == '\xc4\xe4SS' def test_call_special_methods(self): # xxx not completely clear if these are implementation details or not From pypy.commits at gmail.com Sun Jan 31 18:42:18 2016 From: pypy.commits at gmail.com (amauryfa) Date: Sun, 31 Jan 2016 15:42:18 -0800 (PST) Subject: [pypy-commit] pypy py3.3: unicodedata: add tables for special_casing. Message-ID: <56ae9bda.8e301c0a.511ad.fffff8b0@mx.google.com> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r82020:c9f92e754efd Date: 2016-02-01 00:26 +0100 http://bitbucket.org/pypy/pypy/changeset/c9f92e754efd/ Log: unicodedata: add tables for special_casing. diff --git a/rpython/rlib/unicodedata/SpecialCasing-5.2.0.txt b/rpython/rlib/unicodedata/SpecialCasing-5.2.0.txt new file mode 100644 --- /dev/null +++ b/rpython/rlib/unicodedata/SpecialCasing-5.2.0.txt @@ -0,0 +1,273 @@ +# SpecialCasing-5.2.0.txt +# Date: 2009-09-22, 23:25:59 GMT [MD] +# +# Unicode Character Database +# Copyright (c) 1991-2009 Unicode, Inc. +# For terms of use, see http://www.unicode.org/terms_of_use.html +# For documentation, see http://www.unicode.org/reports/tr44/ +# +# Special Casing Properties +# +# This file is a supplement to the UnicodeData file. +# It contains additional information about the casing of Unicode characters. +# (For compatibility, the UnicodeData.txt file only contains case mappings for +# characters where they are 1-1, and independent of context and language. +# For more information, see the discussion of Case Mappings in the Unicode Standard. +# +# All code points not listed in this file that do not have a simple case mappings +# in UnicodeData.txt map to themselves. +# ================================================================================ +# Format +# ================================================================================ +# The entries in this file are in the following machine-readable format: +# +# ; ; ; <upper> ; (<condition_list> ;)? # <comment> +# +# <code>, <lower>, <title>, and <upper> provide character values in hex. If there is more +# than one character, they are separated by spaces. Other than as used to separate +# elements, spaces are to be ignored. +# +# The <condition_list> is optional. Where present, it consists of one or more language IDs +# or contexts, separated by spaces. In these conditions: +# - A condition list overrides the normal behavior if all of the listed conditions are true. +# - The context is always the context of the characters in the original string, +# NOT in the resulting string. +# - Case distinctions in the condition list are not significant. +# - Conditions preceded by "Not_" represent the negation of the condition. +# The condition list is not represented in the UCD as a formal property. +# +# A language ID is defined by BCP 47, with '-' and '_' treated equivalently. +# +# A context for a character C is defined by Section 3.13 Default Case +# Operations, of The Unicode Standard, Version 5.0. +# (This is identical to the context defined by Unicode 4.1.0, +# as specified in http://www.unicode.org/versions/Unicode4.1.0/) +# +# Parsers of this file must be prepared to deal with future additions to this format: +# * Additional contexts +# * Additional fields +# ================================================================================ +# @missing 0000..10FFFF; <slc>; <stc>; <suc> +# ================================================================================ +# Unconditional mappings +# ================================================================================ + +# The German es-zed is special--the normal mapping is to SS. +# Note: the titlecase should never occur in practice. It is equal to titlecase(uppercase(<es-zed>)) + +00DF; 00DF; 0053 0073; 0053 0053; # LATIN SMALL LETTER SHARP S + +# Preserve canonical equivalence for I with dot. Turkic is handled below. + +0130; 0069 0307; 0130; 0130; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# Ligatures + +FB00; FB00; 0046 0066; 0046 0046; # LATIN SMALL LIGATURE FF +FB01; FB01; 0046 0069; 0046 0049; # LATIN SMALL LIGATURE FI +FB02; FB02; 0046 006C; 0046 004C; # LATIN SMALL LIGATURE FL +FB03; FB03; 0046 0066 0069; 0046 0046 0049; # LATIN SMALL LIGATURE FFI +FB04; FB04; 0046 0066 006C; 0046 0046 004C; # LATIN SMALL LIGATURE FFL +FB05; FB05; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE LONG S T +FB06; FB06; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE ST + +0587; 0587; 0535 0582; 0535 0552; # ARMENIAN SMALL LIGATURE ECH YIWN +FB13; FB13; 0544 0576; 0544 0546; # ARMENIAN SMALL LIGATURE MEN NOW +FB14; FB14; 0544 0565; 0544 0535; # ARMENIAN SMALL LIGATURE MEN ECH +FB15; FB15; 0544 056B; 0544 053B; # ARMENIAN SMALL LIGATURE MEN INI +FB16; FB16; 054E 0576; 054E 0546; # ARMENIAN SMALL LIGATURE VEW NOW +FB17; FB17; 0544 056D; 0544 053D; # ARMENIAN SMALL LIGATURE MEN XEH + +# No corresponding uppercase precomposed character + +0149; 0149; 02BC 004E; 02BC 004E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0390; 0390; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03B0; 03B0; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +01F0; 01F0; 004A 030C; 004A 030C; # LATIN SMALL LETTER J WITH CARON +1E96; 1E96; 0048 0331; 0048 0331; # LATIN SMALL LETTER H WITH LINE BELOW +1E97; 1E97; 0054 0308; 0054 0308; # LATIN SMALL LETTER T WITH DIAERESIS +1E98; 1E98; 0057 030A; 0057 030A; # LATIN SMALL LETTER W WITH RING ABOVE +1E99; 1E99; 0059 030A; 0059 030A; # LATIN SMALL LETTER Y WITH RING ABOVE +1E9A; 1E9A; 0041 02BE; 0041 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING +1F50; 1F50; 03A5 0313; 03A5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI +1F52; 1F52; 03A5 0313 0300; 03A5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +1F54; 1F54; 03A5 0313 0301; 03A5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +1F56; 1F56; 03A5 0313 0342; 03A5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +1FB6; 1FB6; 0391 0342; 0391 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI +1FC6; 1FC6; 0397 0342; 0397 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI +1FD2; 1FD2; 0399 0308 0300; 0399 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +1FD3; 1FD3; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +1FD6; 1FD6; 0399 0342; 0399 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI +1FD7; 1FD7; 0399 0308 0342; 0399 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +1FE2; 1FE2; 03A5 0308 0300; 03A5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +1FE3; 1FE3; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +1FE4; 1FE4; 03A1 0313; 03A1 0313; # GREEK SMALL LETTER RHO WITH PSILI +1FE6; 1FE6; 03A5 0342; 03A5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI +1FE7; 1FE7; 03A5 0308 0342; 03A5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +1FF6; 1FF6; 03A9 0342; 03A9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI + +# IMPORTANT-when iota-subscript (0345) is uppercased or titlecased, +# the result will be incorrect unless the iota-subscript is moved to the end +# of any sequence of combining marks. Otherwise, the accents will go on the capital iota. +# This process can be achieved by first transforming the text to NFC before casing. +# E.g. <alpha><iota_subscript><acute> is uppercased to <ALPHA><acute><IOTA> + +# The following cases are already in the UnicodeData file, so are only commented here. + +# 0345; 0345; 0345; 0399; # COMBINING GREEK YPOGEGRAMMENI + +# All letters with YPOGEGRAMMENI (iota-subscript) or PROSGEGRAMMENI (iota adscript) +# have special uppercases. +# Note: characters with PROSGEGRAMMENI are actually titlecase, not uppercase! + +1F80; 1F80; 1F88; 1F08 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI +1F81; 1F81; 1F89; 1F09 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI +1F82; 1F82; 1F8A; 1F0A 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F83; 1F83; 1F8B; 1F0B 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F84; 1F84; 1F8C; 1F0C 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F85; 1F85; 1F8D; 1F0D 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F86; 1F86; 1F8E; 1F0E 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F87; 1F87; 1F8F; 1F0F 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F88; 1F80; 1F88; 1F08 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI +1F89; 1F81; 1F89; 1F09 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI +1F8A; 1F82; 1F8A; 1F0A 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F8B; 1F83; 1F8B; 1F0B 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F8C; 1F84; 1F8C; 1F0C 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F8D; 1F85; 1F8D; 1F0D 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F8E; 1F86; 1F8E; 1F0E 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F8F; 1F87; 1F8F; 1F0F 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1F90; 1F90; 1F98; 1F28 0399; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI +1F91; 1F91; 1F99; 1F29 0399; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI +1F92; 1F92; 1F9A; 1F2A 0399; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F93; 1F93; 1F9B; 1F2B 0399; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F94; 1F94; 1F9C; 1F2C 0399; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F95; 1F95; 1F9D; 1F2D 0399; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F96; 1F96; 1F9E; 1F2E 0399; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F97; 1F97; 1F9F; 1F2F 0399; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F98; 1F90; 1F98; 1F28 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI +1F99; 1F91; 1F99; 1F29 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI +1F9A; 1F92; 1F9A; 1F2A 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F9B; 1F93; 1F9B; 1F2B 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F9C; 1F94; 1F9C; 1F2C 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F9D; 1F95; 1F9D; 1F2D 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F9E; 1F96; 1F9E; 1F2E 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F9F; 1F97; 1F9F; 1F2F 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FA0; 1FA0; 1FA8; 1F68 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI +1FA1; 1FA1; 1FA9; 1F69 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI +1FA2; 1FA2; 1FAA; 1F6A 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1FA3; 1FA3; 1FAB; 1F6B 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1FA4; 1FA4; 1FAC; 1F6C 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1FA5; 1FA5; 1FAD; 1F6D 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1FA6; 1FA6; 1FAE; 1F6E 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1FA7; 1FA7; 1FAF; 1F6F 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1FA8; 1FA0; 1FA8; 1F68 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI +1FA9; 1FA1; 1FA9; 1F69 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI +1FAA; 1FA2; 1FAA; 1F6A 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1FAB; 1FA3; 1FAB; 1F6B 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1FAC; 1FA4; 1FAC; 1F6C 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1FAD; 1FA5; 1FAD; 1F6D 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1FAE; 1FA6; 1FAE; 1F6E 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1FAF; 1FA7; 1FAF; 1F6F 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FB3; 1FB3; 1FBC; 0391 0399; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI +1FBC; 1FB3; 1FBC; 0391 0399; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI +1FC3; 1FC3; 1FCC; 0397 0399; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI +1FCC; 1FC3; 1FCC; 0397 0399; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI +1FF3; 1FF3; 1FFC; 03A9 0399; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI +1FFC; 1FF3; 1FFC; 03A9 0399; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI + +# Some characters with YPOGEGRAMMENI also have no corresponding titlecases + +1FB2; 1FB2; 1FBA 0345; 1FBA 0399; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +1FB4; 1FB4; 0386 0345; 0386 0399; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +1FC2; 1FC2; 1FCA 0345; 1FCA 0399; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +1FC4; 1FC4; 0389 0345; 0389 0399; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +1FF2; 1FF2; 1FFA 0345; 1FFA 0399; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +1FF4; 1FF4; 038F 0345; 038F 0399; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI + +1FB7; 1FB7; 0391 0342 0345; 0391 0342 0399; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +1FC7; 1FC7; 0397 0342 0345; 0397 0342 0399; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +1FF7; 1FF7; 03A9 0342 0345; 03A9 0342 0399; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI + +# ================================================================================ +# Conditional Mappings +# The remainder of this file provides conditional casing data used to produce +# full case mappings. +# ================================================================================ +# Language-Insensitive Mappings +# These are characters whose full case mappings do not depend on language, but do +# depend on context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Special case for final form of sigma + +03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + +# Note: the following cases for non-final are already in the UnicodeData file. + +# 03A3; 03C3; 03A3; 03A3; # GREEK CAPITAL LETTER SIGMA +# 03C3; 03C3; 03A3; 03A3; # GREEK SMALL LETTER SIGMA +# 03C2; 03C2; 03A3; 03A3; # GREEK SMALL LETTER FINAL SIGMA + +# Note: the following cases are not included, since they would case-fold in lowercasing + +# 03C3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK SMALL LETTER SIGMA +# 03C2; 03C3; 03A3; 03A3; Not_Final_Sigma; # GREEK SMALL LETTER FINAL SIGMA + +# ================================================================================ +# Language-Sensitive Mappings +# These are characters whose full case mappings depend on language and perhaps also +# context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Lithuanian + +# Lithuanian retains the dot in a lowercase i when followed by accents. + +# Remove DOT ABOVE after "i" with upper or titlecase + +0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + +# Introduce an explicit dot above when lowercasing capital I's and J's +# whenever there are more accents above. +# (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + +0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I +004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J +012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK +00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE +00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE +0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + +# ================================================================================ + +# Turkish and Azeri + +# I and i-dotless; I-dot and i are case pairs in Turkish and Azeri +# The following rules handle those cases. + +0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE +0130; 0069; 0130; 0130; az; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. +# This matches the behavior of the canonically equivalent I-dot_above + +0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE +0307; ; 0307; 0307; az After_I; # COMBINING DOT ABOVE + +# When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + +0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I +0049; 0131; 0049; 0049; az Not_Before_Dot; # LATIN CAPITAL LETTER I + +# When uppercasing, i turns into a dotted capital I + +0069; 0069; 0130; 0130; tr; # LATIN SMALL LETTER I +0069; 0069; 0130; 0130; az; # LATIN SMALL LETTER I + +# Note: the following case is already in the UnicodeData file. + +# 0131; 0131; 0049; 0049; tr; # LATIN SMALL LETTER DOTLESS I + +# EOF diff --git a/rpython/rlib/unicodedata/SpecialCasing-6.0.0.txt b/rpython/rlib/unicodedata/SpecialCasing-6.0.0.txt new file mode 100644 --- /dev/null +++ b/rpython/rlib/unicodedata/SpecialCasing-6.0.0.txt @@ -0,0 +1,273 @@ +# SpecialCasing-6.0.0.txt +# Date: 2010-05-18, 00:49:39 GMT [MD] +# +# Unicode Character Database +# Copyright (c) 1991-2010 Unicode, Inc. +# For terms of use, see http://www.unicode.org/terms_of_use.html +# For documentation, see http://www.unicode.org/reports/tr44/ +# +# Special Casing Properties +# +# This file is a supplement to the UnicodeData file. +# It contains additional information about the casing of Unicode characters. +# (For compatibility, the UnicodeData.txt file only contains case mappings for +# characters where they are 1-1, and independent of context and language. +# For more information, see the discussion of Case Mappings in the Unicode Standard. +# +# All code points not listed in this file that do not have a simple case mappings +# in UnicodeData.txt map to themselves. +# ================================================================================ +# Format +# ================================================================================ +# The entries in this file are in the following machine-readable format: +# +# <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)? # <comment> +# +# <code>, <lower>, <title>, and <upper> provide character values in hex. If there is more +# than one character, they are separated by spaces. Other than as used to separate +# elements, spaces are to be ignored. +# +# The <condition_list> is optional. Where present, it consists of one or more language IDs +# or contexts, separated by spaces. In these conditions: +# - A condition list overrides the normal behavior if all of the listed conditions are true. +# - The context is always the context of the characters in the original string, +# NOT in the resulting string. +# - Case distinctions in the condition list are not significant. +# - Conditions preceded by "Not_" represent the negation of the condition. +# The condition list is not represented in the UCD as a formal property. +# +# A language ID is defined by BCP 47, with '-' and '_' treated equivalently. +# +# A context for a character C is defined by Section 3.13 Default Case +# Operations, of The Unicode Standard, Version 5.0. +# (This is identical to the context defined by Unicode 4.1.0, +# as specified in http://www.unicode.org/versions/Unicode4.1.0/) +# +# Parsers of this file must be prepared to deal with future additions to this format: +# * Additional contexts +# * Additional fields +# ================================================================================ +# @missing 0000..10FFFF; <slc>; <stc>; <suc> +# ================================================================================ +# Unconditional mappings +# ================================================================================ + +# The German es-zed is special--the normal mapping is to SS. +# Note: the titlecase should never occur in practice. It is equal to titlecase(uppercase(<es-zed>)) + +00DF; 00DF; 0053 0073; 0053 0053; # LATIN SMALL LETTER SHARP S + +# Preserve canonical equivalence for I with dot. Turkic is handled below. + +0130; 0069 0307; 0130; 0130; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# Ligatures + +FB00; FB00; 0046 0066; 0046 0046; # LATIN SMALL LIGATURE FF +FB01; FB01; 0046 0069; 0046 0049; # LATIN SMALL LIGATURE FI +FB02; FB02; 0046 006C; 0046 004C; # LATIN SMALL LIGATURE FL +FB03; FB03; 0046 0066 0069; 0046 0046 0049; # LATIN SMALL LIGATURE FFI +FB04; FB04; 0046 0066 006C; 0046 0046 004C; # LATIN SMALL LIGATURE FFL +FB05; FB05; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE LONG S T +FB06; FB06; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE ST + +0587; 0587; 0535 0582; 0535 0552; # ARMENIAN SMALL LIGATURE ECH YIWN +FB13; FB13; 0544 0576; 0544 0546; # ARMENIAN SMALL LIGATURE MEN NOW +FB14; FB14; 0544 0565; 0544 0535; # ARMENIAN SMALL LIGATURE MEN ECH +FB15; FB15; 0544 056B; 0544 053B; # ARMENIAN SMALL LIGATURE MEN INI +FB16; FB16; 054E 0576; 054E 0546; # ARMENIAN SMALL LIGATURE VEW NOW +FB17; FB17; 0544 056D; 0544 053D; # ARMENIAN SMALL LIGATURE MEN XEH + +# No corresponding uppercase precomposed character + +0149; 0149; 02BC 004E; 02BC 004E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0390; 0390; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03B0; 03B0; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +01F0; 01F0; 004A 030C; 004A 030C; # LATIN SMALL LETTER J WITH CARON +1E96; 1E96; 0048 0331; 0048 0331; # LATIN SMALL LETTER H WITH LINE BELOW +1E97; 1E97; 0054 0308; 0054 0308; # LATIN SMALL LETTER T WITH DIAERESIS +1E98; 1E98; 0057 030A; 0057 030A; # LATIN SMALL LETTER W WITH RING ABOVE +1E99; 1E99; 0059 030A; 0059 030A; # LATIN SMALL LETTER Y WITH RING ABOVE +1E9A; 1E9A; 0041 02BE; 0041 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING +1F50; 1F50; 03A5 0313; 03A5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI +1F52; 1F52; 03A5 0313 0300; 03A5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +1F54; 1F54; 03A5 0313 0301; 03A5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +1F56; 1F56; 03A5 0313 0342; 03A5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +1FB6; 1FB6; 0391 0342; 0391 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI +1FC6; 1FC6; 0397 0342; 0397 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI +1FD2; 1FD2; 0399 0308 0300; 0399 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +1FD3; 1FD3; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +1FD6; 1FD6; 0399 0342; 0399 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI +1FD7; 1FD7; 0399 0308 0342; 0399 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +1FE2; 1FE2; 03A5 0308 0300; 03A5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +1FE3; 1FE3; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +1FE4; 1FE4; 03A1 0313; 03A1 0313; # GREEK SMALL LETTER RHO WITH PSILI +1FE6; 1FE6; 03A5 0342; 03A5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI +1FE7; 1FE7; 03A5 0308 0342; 03A5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +1FF6; 1FF6; 03A9 0342; 03A9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI + +# IMPORTANT-when iota-subscript (0345) is uppercased or titlecased, +# the result will be incorrect unless the iota-subscript is moved to the end +# of any sequence of combining marks. Otherwise, the accents will go on the capital iota. +# This process can be achieved by first transforming the text to NFC before casing. +# E.g. <alpha><iota_subscript><acute> is uppercased to <ALPHA><acute><IOTA> + +# The following cases are already in the UnicodeData file, so are only commented here. + +# 0345; 0345; 0345; 0399; # COMBINING GREEK YPOGEGRAMMENI + +# All letters with YPOGEGRAMMENI (iota-subscript) or PROSGEGRAMMENI (iota adscript) +# have special uppercases. +# Note: characters with PROSGEGRAMMENI are actually titlecase, not uppercase! + +1F80; 1F80; 1F88; 1F08 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI +1F81; 1F81; 1F89; 1F09 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI +1F82; 1F82; 1F8A; 1F0A 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F83; 1F83; 1F8B; 1F0B 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F84; 1F84; 1F8C; 1F0C 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F85; 1F85; 1F8D; 1F0D 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F86; 1F86; 1F8E; 1F0E 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F87; 1F87; 1F8F; 1F0F 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F88; 1F80; 1F88; 1F08 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI +1F89; 1F81; 1F89; 1F09 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI +1F8A; 1F82; 1F8A; 1F0A 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F8B; 1F83; 1F8B; 1F0B 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F8C; 1F84; 1F8C; 1F0C 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F8D; 1F85; 1F8D; 1F0D 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F8E; 1F86; 1F8E; 1F0E 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F8F; 1F87; 1F8F; 1F0F 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1F90; 1F90; 1F98; 1F28 0399; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI +1F91; 1F91; 1F99; 1F29 0399; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI +1F92; 1F92; 1F9A; 1F2A 0399; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F93; 1F93; 1F9B; 1F2B 0399; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F94; 1F94; 1F9C; 1F2C 0399; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F95; 1F95; 1F9D; 1F2D 0399; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F96; 1F96; 1F9E; 1F2E 0399; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F97; 1F97; 1F9F; 1F2F 0399; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F98; 1F90; 1F98; 1F28 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI +1F99; 1F91; 1F99; 1F29 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI +1F9A; 1F92; 1F9A; 1F2A 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F9B; 1F93; 1F9B; 1F2B 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F9C; 1F94; 1F9C; 1F2C 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F9D; 1F95; 1F9D; 1F2D 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F9E; 1F96; 1F9E; 1F2E 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F9F; 1F97; 1F9F; 1F2F 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FA0; 1FA0; 1FA8; 1F68 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI +1FA1; 1FA1; 1FA9; 1F69 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI +1FA2; 1FA2; 1FAA; 1F6A 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1FA3; 1FA3; 1FAB; 1F6B 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1FA4; 1FA4; 1FAC; 1F6C 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1FA5; 1FA5; 1FAD; 1F6D 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1FA6; 1FA6; 1FAE; 1F6E 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1FA7; 1FA7; 1FAF; 1F6F 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1FA8; 1FA0; 1FA8; 1F68 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI +1FA9; 1FA1; 1FA9; 1F69 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI +1FAA; 1FA2; 1FAA; 1F6A 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1FAB; 1FA3; 1FAB; 1F6B 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1FAC; 1FA4; 1FAC; 1F6C 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1FAD; 1FA5; 1FAD; 1F6D 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1FAE; 1FA6; 1FAE; 1F6E 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1FAF; 1FA7; 1FAF; 1F6F 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FB3; 1FB3; 1FBC; 0391 0399; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI +1FBC; 1FB3; 1FBC; 0391 0399; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI +1FC3; 1FC3; 1FCC; 0397 0399; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI +1FCC; 1FC3; 1FCC; 0397 0399; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI +1FF3; 1FF3; 1FFC; 03A9 0399; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI +1FFC; 1FF3; 1FFC; 03A9 0399; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI + +# Some characters with YPOGEGRAMMENI also have no corresponding titlecases + +1FB2; 1FB2; 1FBA 0345; 1FBA 0399; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +1FB4; 1FB4; 0386 0345; 0386 0399; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +1FC2; 1FC2; 1FCA 0345; 1FCA 0399; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +1FC4; 1FC4; 0389 0345; 0389 0399; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +1FF2; 1FF2; 1FFA 0345; 1FFA 0399; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +1FF4; 1FF4; 038F 0345; 038F 0399; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI + +1FB7; 1FB7; 0391 0342 0345; 0391 0342 0399; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +1FC7; 1FC7; 0397 0342 0345; 0397 0342 0399; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +1FF7; 1FF7; 03A9 0342 0345; 03A9 0342 0399; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI + +# ================================================================================ +# Conditional Mappings +# The remainder of this file provides conditional casing data used to produce +# full case mappings. +# ================================================================================ +# Language-Insensitive Mappings +# These are characters whose full case mappings do not depend on language, but do +# depend on context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Special case for final form of sigma + +03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + +# Note: the following cases for non-final are already in the UnicodeData file. + +# 03A3; 03C3; 03A3; 03A3; # GREEK CAPITAL LETTER SIGMA +# 03C3; 03C3; 03A3; 03A3; # GREEK SMALL LETTER SIGMA +# 03C2; 03C2; 03A3; 03A3; # GREEK SMALL LETTER FINAL SIGMA + +# Note: the following cases are not included, since they would case-fold in lowercasing + +# 03C3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK SMALL LETTER SIGMA +# 03C2; 03C3; 03A3; 03A3; Not_Final_Sigma; # GREEK SMALL LETTER FINAL SIGMA + +# ================================================================================ +# Language-Sensitive Mappings +# These are characters whose full case mappings depend on language and perhaps also +# context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Lithuanian + +# Lithuanian retains the dot in a lowercase i when followed by accents. + +# Remove DOT ABOVE after "i" with upper or titlecase + +0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + +# Introduce an explicit dot above when lowercasing capital I's and J's +# whenever there are more accents above. +# (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + +0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I +004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J +012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK +00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE +00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE +0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + +# ================================================================================ + +# Turkish and Azeri + +# I and i-dotless; I-dot and i are case pairs in Turkish and Azeri +# The following rules handle those cases. + +0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE +0130; 0069; 0130; 0130; az; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. +# This matches the behavior of the canonically equivalent I-dot_above + +0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE +0307; ; 0307; 0307; az After_I; # COMBINING DOT ABOVE + +# When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + +0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I +0049; 0131; 0049; 0049; az Not_Before_Dot; # LATIN CAPITAL LETTER I + +# When uppercasing, i turns into a dotted capital I + +0069; 0069; 0130; 0130; tr; # LATIN SMALL LETTER I +0069; 0069; 0130; 0130; az; # LATIN SMALL LETTER I + +# Note: the following case is already in the UnicodeData file. + +# 0131; 0131; 0049; 0049; tr; # LATIN SMALL LETTER DOTLESS I + +# EOF diff --git a/rpython/rlib/unicodedata/SpecialCasing-6.2.0.txt b/rpython/rlib/unicodedata/SpecialCasing-6.2.0.txt new file mode 100644 --- /dev/null +++ b/rpython/rlib/unicodedata/SpecialCasing-6.2.0.txt @@ -0,0 +1,275 @@ +# SpecialCasing-6.2.0.txt +# Date: 2012-05-23, 20:35:15 GMT [MD] +# +# Unicode Character Database +# Copyright (c) 1991-2012 Unicode, Inc. +# For terms of use, see http://www.unicode.org/terms_of_use.html +# For documentation, see http://www.unicode.org/reports/tr44/ +# +# Special Casing Properties +# +# This file is a supplement to the UnicodeData file. +# It contains additional information about the casing of Unicode characters. +# (For compatibility, the UnicodeData.txt file only contains case mappings for +# characters where they are 1-1, and independent of context and language. +# For more information, see the discussion of Case Mappings in the Unicode Standard. +# +# All code points not listed in this file that do not have a simple case mappings +# in UnicodeData.txt map to themselves. +# ================================================================================ +# Format +# ================================================================================ +# The entries in this file are in the following machine-readable format: +# +# <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)? # <comment> +# +# <code>, <lower>, <title>, and <upper> provide character values in hex. If there is more +# than one character, they are separated by spaces. Other than as used to separate +# elements, spaces are to be ignored. +# +# The <condition_list> is optional. Where present, it consists of one or more language IDs +# or contexts, separated by spaces. In these conditions: +# - A condition list overrides the normal behavior if all of the listed conditions are true. +# - The context is always the context of the characters in the original string, +# NOT in the resulting string. +# - Case distinctions in the condition list are not significant. +# - Conditions preceded by "Not_" represent the negation of the condition. +# The condition list is not represented in the UCD as a formal property. +# +# A language ID is defined by BCP 47, with '-' and '_' treated equivalently. +# +# A context for a character C is defined by Section 3.13 Default Case +# Operations, of The Unicode Standard, Version 5.0. +# (This is identical to the context defined by Unicode 4.1.0, +# as specified in http://www.unicode.org/versions/Unicode4.1.0/) +# +# Parsers of this file must be prepared to deal with future additions to this format: +# * Additional contexts +# * Additional fields +# ================================================================================ + +# @missing: 0000..10FFFF; <slc>; <stc>; <suc>; + +# ================================================================================ +# Unconditional mappings +# ================================================================================ + +# The German es-zed is special--the normal mapping is to SS. +# Note: the titlecase should never occur in practice. It is equal to titlecase(uppercase(<es-zed>)) + +00DF; 00DF; 0053 0073; 0053 0053; # LATIN SMALL LETTER SHARP S + +# Preserve canonical equivalence for I with dot. Turkic is handled below. + +0130; 0069 0307; 0130; 0130; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# Ligatures + +FB00; FB00; 0046 0066; 0046 0046; # LATIN SMALL LIGATURE FF +FB01; FB01; 0046 0069; 0046 0049; # LATIN SMALL LIGATURE FI +FB02; FB02; 0046 006C; 0046 004C; # LATIN SMALL LIGATURE FL +FB03; FB03; 0046 0066 0069; 0046 0046 0049; # LATIN SMALL LIGATURE FFI +FB04; FB04; 0046 0066 006C; 0046 0046 004C; # LATIN SMALL LIGATURE FFL +FB05; FB05; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE LONG S T +FB06; FB06; 0053 0074; 0053 0054; # LATIN SMALL LIGATURE ST + +0587; 0587; 0535 0582; 0535 0552; # ARMENIAN SMALL LIGATURE ECH YIWN +FB13; FB13; 0544 0576; 0544 0546; # ARMENIAN SMALL LIGATURE MEN NOW +FB14; FB14; 0544 0565; 0544 0535; # ARMENIAN SMALL LIGATURE MEN ECH +FB15; FB15; 0544 056B; 0544 053B; # ARMENIAN SMALL LIGATURE MEN INI +FB16; FB16; 054E 0576; 054E 0546; # ARMENIAN SMALL LIGATURE VEW NOW +FB17; FB17; 0544 056D; 0544 053D; # ARMENIAN SMALL LIGATURE MEN XEH + +# No corresponding uppercase precomposed character + +0149; 0149; 02BC 004E; 02BC 004E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0390; 0390; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03B0; 03B0; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +01F0; 01F0; 004A 030C; 004A 030C; # LATIN SMALL LETTER J WITH CARON +1E96; 1E96; 0048 0331; 0048 0331; # LATIN SMALL LETTER H WITH LINE BELOW +1E97; 1E97; 0054 0308; 0054 0308; # LATIN SMALL LETTER T WITH DIAERESIS +1E98; 1E98; 0057 030A; 0057 030A; # LATIN SMALL LETTER W WITH RING ABOVE +1E99; 1E99; 0059 030A; 0059 030A; # LATIN SMALL LETTER Y WITH RING ABOVE +1E9A; 1E9A; 0041 02BE; 0041 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING +1F50; 1F50; 03A5 0313; 03A5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI +1F52; 1F52; 03A5 0313 0300; 03A5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA +1F54; 1F54; 03A5 0313 0301; 03A5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA +1F56; 1F56; 03A5 0313 0342; 03A5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI +1FB6; 1FB6; 0391 0342; 0391 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI +1FC6; 1FC6; 0397 0342; 0397 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI +1FD2; 1FD2; 0399 0308 0300; 0399 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA +1FD3; 1FD3; 0399 0308 0301; 0399 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA +1FD6; 1FD6; 0399 0342; 0399 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI +1FD7; 1FD7; 0399 0308 0342; 0399 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI +1FE2; 1FE2; 03A5 0308 0300; 03A5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA +1FE3; 1FE3; 03A5 0308 0301; 03A5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA +1FE4; 1FE4; 03A1 0313; 03A1 0313; # GREEK SMALL LETTER RHO WITH PSILI +1FE6; 1FE6; 03A5 0342; 03A5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI +1FE7; 1FE7; 03A5 0308 0342; 03A5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI +1FF6; 1FF6; 03A9 0342; 03A9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI + +# IMPORTANT-when iota-subscript (0345) is uppercased or titlecased, +# the result will be incorrect unless the iota-subscript is moved to the end +# of any sequence of combining marks. Otherwise, the accents will go on the capital iota. +# This process can be achieved by first transforming the text to NFC before casing. +# E.g. <alpha><iota_subscript><acute> is uppercased to <ALPHA><acute><IOTA> + +# The following cases are already in the UnicodeData file, so are only commented here. + +# 0345; 0345; 0345; 0399; # COMBINING GREEK YPOGEGRAMMENI + +# All letters with YPOGEGRAMMENI (iota-subscript) or PROSGEGRAMMENI (iota adscript) +# have special uppercases. +# Note: characters with PROSGEGRAMMENI are actually titlecase, not uppercase! + +1F80; 1F80; 1F88; 1F08 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI +1F81; 1F81; 1F89; 1F09 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI +1F82; 1F82; 1F8A; 1F0A 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F83; 1F83; 1F8B; 1F0B 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F84; 1F84; 1F8C; 1F0C 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F85; 1F85; 1F8D; 1F0D 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F86; 1F86; 1F8E; 1F0E 0399; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F87; 1F87; 1F8F; 1F0F 0399; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F88; 1F80; 1F88; 1F08 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI +1F89; 1F81; 1F89; 1F09 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI +1F8A; 1F82; 1F8A; 1F0A 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F8B; 1F83; 1F8B; 1F0B 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F8C; 1F84; 1F8C; 1F0C 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F8D; 1F85; 1F8D; 1F0D 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F8E; 1F86; 1F8E; 1F0E 0399; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F8F; 1F87; 1F8F; 1F0F 0399; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1F90; 1F90; 1F98; 1F28 0399; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI +1F91; 1F91; 1F99; 1F29 0399; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI +1F92; 1F92; 1F9A; 1F2A 0399; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1F93; 1F93; 1F9B; 1F2B 0399; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1F94; 1F94; 1F9C; 1F2C 0399; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1F95; 1F95; 1F9D; 1F2D 0399; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1F96; 1F96; 1F9E; 1F2E 0399; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1F97; 1F97; 1F9F; 1F2F 0399; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1F98; 1F90; 1F98; 1F28 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI +1F99; 1F91; 1F99; 1F29 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI +1F9A; 1F92; 1F9A; 1F2A 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1F9B; 1F93; 1F9B; 1F2B 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1F9C; 1F94; 1F9C; 1F2C 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1F9D; 1F95; 1F9D; 1F2D 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1F9E; 1F96; 1F9E; 1F2E 0399; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1F9F; 1F97; 1F9F; 1F2F 0399; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FA0; 1FA0; 1FA8; 1F68 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI +1FA1; 1FA1; 1FA9; 1F69 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI +1FA2; 1FA2; 1FAA; 1F6A 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI +1FA3; 1FA3; 1FAB; 1F6B 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI +1FA4; 1FA4; 1FAC; 1F6C 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI +1FA5; 1FA5; 1FAD; 1F6D 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI +1FA6; 1FA6; 1FAE; 1F6E 0399; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI +1FA7; 1FA7; 1FAF; 1F6F 0399; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI +1FA8; 1FA0; 1FA8; 1F68 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI +1FA9; 1FA1; 1FA9; 1F69 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI +1FAA; 1FA2; 1FAA; 1F6A 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI +1FAB; 1FA3; 1FAB; 1F6B 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI +1FAC; 1FA4; 1FAC; 1F6C 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI +1FAD; 1FA5; 1FAD; 1F6D 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI +1FAE; 1FA6; 1FAE; 1F6E 0399; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI +1FAF; 1FA7; 1FAF; 1F6F 0399; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI +1FB3; 1FB3; 1FBC; 0391 0399; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI +1FBC; 1FB3; 1FBC; 0391 0399; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI +1FC3; 1FC3; 1FCC; 0397 0399; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI +1FCC; 1FC3; 1FCC; 0397 0399; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI +1FF3; 1FF3; 1FFC; 03A9 0399; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI +1FFC; 1FF3; 1FFC; 03A9 0399; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI + +# Some characters with YPOGEGRAMMENI also have no corresponding titlecases + +1FB2; 1FB2; 1FBA 0345; 1FBA 0399; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI +1FB4; 1FB4; 0386 0345; 0386 0399; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI +1FC2; 1FC2; 1FCA 0345; 1FCA 0399; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI +1FC4; 1FC4; 0389 0345; 0389 0399; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI +1FF2; 1FF2; 1FFA 0345; 1FFA 0399; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI +1FF4; 1FF4; 038F 0345; 038F 0399; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI + +1FB7; 1FB7; 0391 0342 0345; 0391 0342 0399; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI +1FC7; 1FC7; 0397 0342 0345; 0397 0342 0399; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI +1FF7; 1FF7; 03A9 0342 0345; 03A9 0342 0399; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI + +# ================================================================================ +# Conditional Mappings +# The remainder of this file provides conditional casing data used to produce +# full case mappings. +# ================================================================================ +# Language-Insensitive Mappings +# These are characters whose full case mappings do not depend on language, but do +# depend on context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Special case for final form of sigma + +03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + +# Note: the following cases for non-final are already in the UnicodeData file. + +# 03A3; 03C3; 03A3; 03A3; # GREEK CAPITAL LETTER SIGMA +# 03C3; 03C3; 03A3; 03A3; # GREEK SMALL LETTER SIGMA +# 03C2; 03C2; 03A3; 03A3; # GREEK SMALL LETTER FINAL SIGMA + +# Note: the following cases are not included, since they would case-fold in lowercasing + +# 03C3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK SMALL LETTER SIGMA +# 03C2; 03C3; 03A3; 03A3; Not_Final_Sigma; # GREEK SMALL LETTER FINAL SIGMA + +# ================================================================================ +# Language-Sensitive Mappings +# These are characters whose full case mappings depend on language and perhaps also +# context (which characters come before or after). For more information +# see the header of this file and the Unicode Standard. +# ================================================================================ + +# Lithuanian + +# Lithuanian retains the dot in a lowercase i when followed by accents. + +# Remove DOT ABOVE after "i" with upper or titlecase + +0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + +# Introduce an explicit dot above when lowercasing capital I's and J's +# whenever there are more accents above. +# (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + +0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I +004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J +012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK +00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE +00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE +0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + +# ================================================================================ + +# Turkish and Azeri + +# I and i-dotless; I-dot and i are case pairs in Turkish and Azeri +# The following rules handle those cases. + +0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE +0130; 0069; 0130; 0130; az; # LATIN CAPITAL LETTER I WITH DOT ABOVE + +# When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. +# This matches the behavior of the canonically equivalent I-dot_above + +0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE +0307; ; 0307; 0307; az After_I; # COMBINING DOT ABOVE + +# When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + +0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I +0049; 0131; 0049; 0049; az Not_Before_Dot; # LATIN CAPITAL LETTER I + +# When uppercasing, i turns into a dotted capital I + +0069; 0069; 0130; 0130; tr; # LATIN SMALL LETTER I +0069; 0069; 0130; 0130; az; # LATIN SMALL LETTER I + +# Note: the following case is already in the UnicodeData file. + +# 0131; 0131; 0049; 0049; tr; # LATIN SMALL LETTER DOTLESS I + +# EOF diff --git a/rpython/rlib/unicodedata/generate_unicodedb.py b/rpython/rlib/unicodedata/generate_unicodedb.py --- a/rpython/rlib/unicodedata/generate_unicodedb.py +++ b/rpython/rlib/unicodedata/generate_unicodedb.py @@ -264,6 +264,24 @@ for code, value in extra_numeric.iteritems(): table.clone_char(code).numeric = value + table.special_casing = {} + if 'special_casing' in files: + for line in files['special_casing']: + line = line[:-1].split('#', 1)[0] + if not line: + continue + data = line.split("; ") + if data[4]: + # We ignore all conditionals (since they depend on + # languages) except for one, which is hardcoded. See + # handle_capital_sigma in unicodeobject.py. + continue + c = int(data[0], 16) + lower = [int(char, 16) for char in data[1].split()] + title = [int(char, 16) for char in data[2].split()] + upper = [int(char, 16) for char in data[3].split()] + table.special_casing[c] = (lower, title, upper) + # Compute full decompositions. for code, char in table.enum_chars(): table.get_canonical_decomposition(code) @@ -703,6 +721,7 @@ writeDict(outfile, '_toupper', toupper, base_mod) writeDict(outfile, '_tolower', tolower, base_mod) writeDict(outfile, '_totitle', totitle, base_mod) + writeDict(outfile, '_special_casing', table.special_casing, base_mod) print >> outfile, ''' def toupper(code): try: @@ -730,6 +749,39 @@ return base_mod._totitle.get(code, code) else: return code + +def toupper_full(code): + try: + return _special_casing[code][2] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][2] + except KeyError: + pass + return [toupper(code)] + +def tolower_full(code): + try: + return _special_casing[code][0] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][0] + except KeyError: + pass + return [tolower(code)] + +def totitle_full(code): + try: + return _special_casing[code][1] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][1] + except KeyError: + pass + return [totitle(code)] ''' # Decomposition decomposition = {} @@ -854,6 +906,8 @@ name_aliases='NameAliases-%(version)s.txt', named_sequences = 'NamedSequences-%(version)s.txt', ) + if options.unidata_version > '5': + filenames['special_casing'] = 'SpecialCasing-%(version)s.txt' filenames = dict((name, filename % dict(version=options.unidata_version)) for (name, filename) in filenames.items()) files = dict((name, open(filename)) diff --git a/rpython/rlib/unicodedata/test/test_unicodedata.py b/rpython/rlib/unicodedata/test/test_unicodedata.py --- a/rpython/rlib/unicodedata/test/test_unicodedata.py +++ b/rpython/rlib/unicodedata/test/test_unicodedata.py @@ -118,3 +118,10 @@ for un, name in additions.iteritems(): assert unicodedb_6_0_0.name(un) == name assert unicodedb_6_0_0.isprintable(un) + + def test_special_casing(self): + assert unicodedb_6_0_0.tolower_full(ord('A')) == [ord('a')] + # The German es-zed is special--the normal mapping is to SS. + assert unicodedb_6_0_0.tolower_full(ord(u'\xdf')) == [0xdf] + assert unicodedb_6_0_0.toupper_full(ord(u'\xdf')) == map(ord, 'SS') + assert unicodedb_6_0_0.totitle_full(ord(u'\xdf')) == map(ord, 'Ss') diff --git a/rpython/rlib/unicodedata/unicodedb_3_2_0.py b/rpython/rlib/unicodedata/unicodedb_3_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_3_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_3_2_0.py @@ -19403,6 +19403,114 @@ 66638: None, 66639: None, } +_special_casing = { +} + +_special_casing_corrected = { +223: None, +304: None, +329: None, +496: None, +912: None, +944: None, +1415: None, +7830: None, +7831: None, +7832: None, +7833: None, +7834: None, +8016: None, +8018: None, +8020: None, +8022: None, +8064: None, +8065: None, +8066: None, +8067: None, +8068: None, +8069: None, +8070: None, +8071: None, +8072: None, +8073: None, +8074: None, +8075: None, +8076: None, +8077: None, +8078: None, +8079: None, +8080: None, +8081: None, +8082: None, +8083: None, +8084: None, +8085: None, +8086: None, +8087: None, +8088: None, +8089: None, +8090: None, +8091: None, +8092: None, +8093: None, +8094: None, +8095: None, +8096: None, +8097: None, +8098: None, +8099: None, +8100: None, +8101: None, +8102: None, +8103: None, +8104: None, +8105: None, +8106: None, +8107: None, +8108: None, +8109: None, +8110: None, +8111: None, +8114: None, +8115: None, +8116: None, +8118: None, +8119: None, +8124: None, +8130: None, +8131: None, +8132: None, +8134: None, +8135: None, +8140: None, +8146: None, +8147: None, +8150: None, +8151: None, +8162: None, +8163: None, +8164: None, +8166: None, +8167: None, +8178: None, +8179: None, +8180: None, +8182: None, +8183: None, +8188: None, +64256: None, +64257: None, +64258: None, +64259: None, +64260: None, +64261: None, +64262: None, +64275: None, +64276: None, +64277: None, +64278: None, +64279: None, +} def toupper(code): try: @@ -19431,6 +19539,39 @@ else: return code +def toupper_full(code): + try: + return _special_casing[code][2] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][2] + except KeyError: + pass + return [toupper(code)] + +def tolower_full(code): + try: + return _special_casing[code][0] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][0] + except KeyError: + pass + return [tolower(code)] + +def totitle_full(code): + try: + return _special_casing[code][1] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][1] + except KeyError: + pass + return [totitle(code)] + _raw_decomposition = { 194664: '2136A', 194676: '5F33', diff --git a/rpython/rlib/unicodedata/unicodedb_5_2_0.py b/rpython/rlib/unicodedata/unicodedb_5_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_5_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_5_2_0.py @@ -142996,6 +142996,114 @@ _totitle_corrected = { } +_special_casing = { +223: ([223], [83, 115], [83, 83]), +304: ([105, 775], [304], [304]), +329: ([329], [700, 78], [700, 78]), +496: ([496], [74, 780], [74, 780]), +912: ([912], [921, 776, 769], [921, 776, 769]), +944: ([944], [933, 776, 769], [933, 776, 769]), +1415: ([1415], [1333, 1410], [1333, 1362]), +7830: ([7830], [72, 817], [72, 817]), +7831: ([7831], [84, 776], [84, 776]), +7832: ([7832], [87, 778], [87, 778]), +7833: ([7833], [89, 778], [89, 778]), +7834: ([7834], [65, 702], [65, 702]), +8016: ([8016], [933, 787], [933, 787]), +8018: ([8018], [933, 787, 768], [933, 787, 768]), +8020: ([8020], [933, 787, 769], [933, 787, 769]), +8022: ([8022], [933, 787, 834], [933, 787, 834]), +8064: ([8064], [8072], [7944, 921]), +8065: ([8065], [8073], [7945, 921]), +8066: ([8066], [8074], [7946, 921]), +8067: ([8067], [8075], [7947, 921]), +8068: ([8068], [8076], [7948, 921]), +8069: ([8069], [8077], [7949, 921]), +8070: ([8070], [8078], [7950, 921]), +8071: ([8071], [8079], [7951, 921]), +8072: ([8064], [8072], [7944, 921]), +8073: ([8065], [8073], [7945, 921]), +8074: ([8066], [8074], [7946, 921]), +8075: ([8067], [8075], [7947, 921]), +8076: ([8068], [8076], [7948, 921]), +8077: ([8069], [8077], [7949, 921]), +8078: ([8070], [8078], [7950, 921]), +8079: ([8071], [8079], [7951, 921]), +8080: ([8080], [8088], [7976, 921]), +8081: ([8081], [8089], [7977, 921]), +8082: ([8082], [8090], [7978, 921]), +8083: ([8083], [8091], [7979, 921]), +8084: ([8084], [8092], [7980, 921]), +8085: ([8085], [8093], [7981, 921]), +8086: ([8086], [8094], [7982, 921]), +8087: ([8087], [8095], [7983, 921]), +8088: ([8080], [8088], [7976, 921]), +8089: ([8081], [8089], [7977, 921]), +8090: ([8082], [8090], [7978, 921]), +8091: ([8083], [8091], [7979, 921]), +8092: ([8084], [8092], [7980, 921]), +8093: ([8085], [8093], [7981, 921]), +8094: ([8086], [8094], [7982, 921]), +8095: ([8087], [8095], [7983, 921]), +8096: ([8096], [8104], [8040, 921]), +8097: ([8097], [8105], [8041, 921]), +8098: ([8098], [8106], [8042, 921]), +8099: ([8099], [8107], [8043, 921]), +8100: ([8100], [8108], [8044, 921]), +8101: ([8101], [8109], [8045, 921]), +8102: ([8102], [8110], [8046, 921]), +8103: ([8103], [8111], [8047, 921]), +8104: ([8096], [8104], [8040, 921]), +8105: ([8097], [8105], [8041, 921]), +8106: ([8098], [8106], [8042, 921]), +8107: ([8099], [8107], [8043, 921]), +8108: ([8100], [8108], [8044, 921]), +8109: ([8101], [8109], [8045, 921]), +8110: ([8102], [8110], [8046, 921]), +8111: ([8103], [8111], [8047, 921]), +8114: ([8114], [8122, 837], [8122, 921]), +8115: ([8115], [8124], [913, 921]), +8116: ([8116], [902, 837], [902, 921]), +8118: ([8118], [913, 834], [913, 834]), +8119: ([8119], [913, 834, 837], [913, 834, 921]), +8124: ([8115], [8124], [913, 921]), +8130: ([8130], [8138, 837], [8138, 921]), +8131: ([8131], [8140], [919, 921]), +8132: ([8132], [905, 837], [905, 921]), +8134: ([8134], [919, 834], [919, 834]), +8135: ([8135], [919, 834, 837], [919, 834, 921]), +8140: ([8131], [8140], [919, 921]), +8146: ([8146], [921, 776, 768], [921, 776, 768]), +8147: ([8147], [921, 776, 769], [921, 776, 769]), +8150: ([8150], [921, 834], [921, 834]), +8151: ([8151], [921, 776, 834], [921, 776, 834]), +8162: ([8162], [933, 776, 768], [933, 776, 768]), +8163: ([8163], [933, 776, 769], [933, 776, 769]), +8164: ([8164], [929, 787], [929, 787]), +8166: ([8166], [933, 834], [933, 834]), +8167: ([8167], [933, 776, 834], [933, 776, 834]), +8178: ([8178], [8186, 837], [8186, 921]), +8179: ([8179], [8188], [937, 921]), +8180: ([8180], [911, 837], [911, 921]), +8182: ([8182], [937, 834], [937, 834]), +8183: ([8183], [937, 834, 837], [937, 834, 921]), +8188: ([8179], [8188], [937, 921]), +64256: ([64256], [70, 102], [70, 70]), +64257: ([64257], [70, 105], [70, 73]), +64258: ([64258], [70, 108], [70, 76]), +64259: ([64259], [70, 102, 105], [70, 70, 73]), +64260: ([64260], [70, 102, 108], [70, 70, 76]), +64261: ([64261], [83, 116], [83, 84]), +64262: ([64262], [83, 116], [83, 84]), +64275: ([64275], [1348, 1398], [1348, 1350]), +64276: ([64276], [1348, 1381], [1348, 1333]), +64277: ([64277], [1348, 1387], [1348, 1339]), +64278: ([64278], [1358, 1398], [1358, 1350]), +64279: ([64279], [1348, 1389], [1348, 1341]), +} + +_special_casing_corrected = { +} def toupper(code): try: @@ -143024,6 +143132,39 @@ else: return code +def toupper_full(code): + try: + return _special_casing[code][2] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][2] + except KeyError: + pass + return [toupper(code)] + +def tolower_full(code): + try: + return _special_casing[code][0] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][0] + except KeyError: + pass + return [tolower(code)] + +def totitle_full(code): + try: + return _special_casing[code][1] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][1] + except KeyError: + pass + return [totitle(code)] + _raw_decomposition = { 160: '<noBreak> 0020', 168: '<compat> 0020 0308', diff --git a/rpython/rlib/unicodedata/unicodedb_6_0_0.py b/rpython/rlib/unicodedata/unicodedb_6_0_0.py --- a/rpython/rlib/unicodedata/unicodedb_6_0_0.py +++ b/rpython/rlib/unicodedata/unicodedb_6_0_0.py @@ -5710,6 +5710,11 @@ _totitle_corrected = { } +_special_casing = { +} + +_special_casing_corrected = { +} def toupper(code): try: @@ -5738,6 +5743,39 @@ else: return code +def toupper_full(code): + try: + return _special_casing[code][2] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][2] + except KeyError: + pass + return [toupper(code)] + +def tolower_full(code): + try: + return _special_casing[code][0] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][0] + except KeyError: + pass + return [tolower(code)] + +def totitle_full(code): + try: + return _special_casing[code][1] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][1] + except KeyError: + pass + return [totitle(code)] + _raw_decomposition = { 8341: '<sub> 0068', 8342: '<sub> 006B', diff --git a/rpython/rlib/unicodedata/unicodedb_6_2_0.py b/rpython/rlib/unicodedata/unicodedb_6_2_0.py --- a/rpython/rlib/unicodedata/unicodedb_6_2_0.py +++ b/rpython/rlib/unicodedata/unicodedb_6_2_0.py @@ -8245,6 +8245,11 @@ _totitle_corrected = { } +_special_casing = { +} + +_special_casing_corrected = { +} def toupper(code): try: @@ -8273,6 +8278,39 @@ else: return code +def toupper_full(code): + try: + return _special_casing[code][2] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][2] + except KeyError: + pass + return [toupper(code)] + +def tolower_full(code): + try: + return _special_casing[code][0] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][0] + except KeyError: + pass + return [tolower(code)] + +def totitle_full(code): + try: + return _special_casing[code][1] + except KeyError: + if base_mod is not None and code not in _special_casing_corrected: + try: + return base_mod._special_casing[code][1] + except KeyError: + pass + return [totitle(code)] + _raw_decomposition = { 8341: '<sub> 0068', 8342: '<sub> 006B',