From pypy.commits at gmail.com Mon Aug 1 04:47:01 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 01:47:01 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: merge heads
Message-ID: <579f0c85.56421c0a.23487.5da5@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5659:15244739241e
Date: 2016-08-01 10:46 +0200
http://bitbucket.org/pypy/extradoc/changeset/15244739241e/
Log: merge heads
diff --git a/blog/draft/revdb.rst b/blog/draft/revdb.rst
--- a/blog/draft/revdb.rst
+++ b/blog/draft/revdb.rst
@@ -286,7 +286,8 @@
(19422)$ bcontinue
[searching 19325..19422]
- updating watchpoint value: $0.value => RuntimeError: '$0' refers to an object created later in time
+ updating watchpoint value: $0.value => RuntimeError:
+ '$0' refers to an object created later in time
Reverse-hit watchpoint 1: $0.value
File "/tmp/x.py", line 6 in :
import os
diff --git a/talk/ep2016/Makefile b/talk/ep2016/Makefile
new file mode 100644
--- /dev/null
+++ b/talk/ep2016/Makefile
@@ -0,0 +1,6 @@
+slides.pdf: slides.tex author.latex
+ pdflatex $<
+
+slides.tex: slides.rst
+ rst2beamer.py slides.rst > slides.tex
+ sed 's/\\date{}/\\input{author.latex}/' -i slides.tex || exit
diff --git a/talk/ep2016/author.latex b/talk/ep2016/author.latex
new file mode 100644
--- /dev/null
+++ b/talk/ep2016/author.latex
@@ -0,0 +1,7 @@
+\definecolor{rrblitbackground}{rgb}{0.4, 0.0, 0.0}
+
+\title[CFFI and PyPy]{CFFI and PyPy}
+\author[Armin Rigo]{Armin Rigo}
+
+\institute{EuroPython 2016}
+\date{July 2016}
diff --git a/talk/ep2016/slides.rst b/talk/ep2016/slides.rst
new file mode 100644
--- /dev/null
+++ b/talk/ep2016/slides.rst
@@ -0,0 +1,576 @@
+====================================================
+CFFI (and PyPy)
+====================================================
+
+.. raw:: latex
+
+ \catcode`\|=13
+ \def|{\hskip 1cm}
+
+ \let\foobarbaz=>
+ \catcode`\>=13
+ \def>{\foobarbaz\relax}
+
+
+
+CFFI
+====
+
+* created in 2012
+
+* successful project according to PyPI
+
+* 3.4 million downloads for January
+
+* total 22.3 millions, 25th place on `pypi-ranking.info`
+
+ - Django is 31st
+
+* some high-visibility projects have switched to it (Cryptography)
+
+
+PyPy
+====
+
+* success: harder to say for sure
+
+* more later
+
+
+CFFI
+====
+
+
+
+CFFI
+====
+
+* call C from Python
+
+* CFFI = C Foreign Function Interface
+
+* shares ideas from Cython, ctypes, LuaJIT's FFI, SWIG...
+
+
+CFFI demo
+=========
+
+::
+
+ | $ man getpwnam
+
+ |
+
+ | SYNOPSIS
+
+ | | #include
+
+ | | #include
+
+ | |
+
+ | | struct passwd *getpwnam(const char *);
+
+
+CFFI demo
+=========
+
+::
+
+ | .
+
+ | .
+
+ | .
+
+ | The passwd structure is defined in
+
+ | as follows:
+
+ |
+
+ | struct passwd {
+
+ | | char *pw_name; /* username */
+
+ | | char *pw_passwd; /* user password */
+
+ | | uid_t pw_uid; /* user ID */
+
+ | .
+
+ | .
+
+ | .
+
+
+CFFI demo
+=========
+
+::
+
+ | import cffi
+
+ | ffibuilder = cffi.FFI()
+
+ |
+
+ | ffibuilder.cdef("""
+
+ | | typedef int... uid_t;
+
+ | | struct passwd {
+
+ | | | uid_t pw_uid;
+
+ | | | ...;
+
+ | | };
+
+ | | struct passwd *getpwnam(const char *);
+
+ | """)
+
+
+CFFI demo
+=========
+
+::
+
+ | ffibuilder.set_source("_pwuid_cffi", """
+
+ | | #include
+
+ | | #include
+
+ | """)
+
+ |
+
+ | ffibuilder.compile()
+
+ |
+
+... and put that in pwuid_build.py
+
+
+CFFI demo
+=========
+
+::
+
+ | python pwuid_build.py
+
+ |
+
+creates _pwuid_cffi.so
+
+
+CFFI demo
+=========
+
+::
+
+ from _pwuid_cffi import lib
+
+ print lib.getpwnam("username").pw_uid
+
+* That's all folks
+
+
+CFFI demo
+=========
+
+::
+
+ from _pwuid_cffi import ffi, lib
+
+* ``lib`` gives access to all functions from the cdef
+
+ - like ``lib.getpwnam()``
+
+* ``ffi`` gives access to a few general helpers
+
+
+ffibuilder.cdef()
+=====================
+
+::
+
+ | ffibuilder.cdef("""
+
+ | | int foo1(int a, int b);
+
+ | |
+
+ | | typedef ... Window;
+
+ | | Window *make_window(int w, int h);
+
+ | | void hide_window(Window *);
+
+ | """)
+
+
+ffi.new()
+=========
+
+::
+
+ | >>> p = ffi.new("char[]", "Some string")
+
+ | >>> p
+
+ |
+
+ |
+
+ | >>> p[1]
+
+ | 'o'
+
+ |
+
+ | >>> q = lib.getpwnam(p)
+
+ | >>> q
+
+ |
+
+ |
+
+ | >>> q.pw_uid
+
+ | 500
+
+ffi.cast()
+==========
+
+::
+
+ | >>> q = lib.getpwnam("root")
+
+ | >>> q
+
+ |
+
+ |
+
+ | >>> ffi.cast("void *", q)
+
+ |
+
+ |
+
+ | >>> int(ffi.cast("intptr_t", q))
+
+ | 305419896
+
+ | >>> hex(_)
+
+ | 0x12345678
+
+
+ffi.string()
+============
+
+::
+
+ | >>> p
+
+ |
+
+ |
+
+ | >>> p.pw_uid
+
+ | 500
+
+ |
+
+ | >>> p.pw_name
+
+ |
+
+ |
+
+ | >>> ffi.string(p.pw_name)
+
+ | "username"
+
+
+ffi.new_handle()
+================
+
+::
+
+ | >>> x = X()
+
+ | >>> h1 = ffi.new_handle(x)
+
+ | >>> h1
+
+ | >
+
+ | >>> lib.save_away(h1)
+
+ |
+
+ | >>> h2 = lib.fish_again()
+
+ | >>> h2
+
+ |
+
+ |
+
+ | >>> ffi.from_handle(h2)
+
+ |
+
+
+CFFI
+====
+
+* supports more or less the whole C
+
+* there is more than this short introduction suggests
+
+
+CFFI
+====
+
+* in real life, you want to provide a Pythonic API to a C library
+
+* you write Python functions and classes implementing it
+
+* all CFFI objects like ```` are hidden inside
+
+
+CFFI
+====
+
+* other use cases:
+
+ - call C code that you write yourself, not a separate C library
+
+ - API versus ABI mode: can also run in a ctypes-like way if
+ you don't want to depend on any C compiler at all
+
+* support for "embedding" Python inside some other non-Python program
+
+ - now you really never need the CPython C API any more
+
+
+CFFI
+====
+
+* see the docs: http://cffi.readthedocs.org/
+
+
+
+PyPy
+====
+
+
+PyPy
+====
+
+* a Python interpreter
+
+* different from the standard, which is CPython
+
+* main goal of PyPy: speed
+
+
+PyPy
+====
+
+::
+
+ | $ pypy
+
+ | Python 2.7.10 (7e8df3df9641, Jun 28 2016)
+
+ | [PyPy 5.3.1 with GCC 6.1.1] on linux2
+
+ | Type "help", "copyright", "credits" or
+
+ | >>>> 2+3
+
+ | 5
+
+ | >>>>
+
+
+PyPy
+====
+
+* run ``pypy my_program.py`` instead of ``python my_program.py``
+
+* contains a JIT compiler
+
+
+PyPy: Garbage Collection
+========================
+
+* "**moving,** generational, incremental GC"
+
+* objects don't have reference counters
+
+* allocated in a "nursery"
+
+* when nursery full, surviving objects are moved out
+
+* usually works on nursery objects only (fast), but rarely also perform
+ a full GC
+
+
+PyPy: C extensions
+==================
+
+* PyPy works great for running Python
+
+* less great when there are CPython C extension modules involved
+
+* not directly possible: we have moving, non-reference-counted objects,
+ and the C code expects non-moving, reference-counted objects
+
+
+PyPy: C extensions
+==================
+
+* PyPy has still some support for them, called its ``cpyext`` module
+
+* emulate all objects for C extensions with a shadow, non-movable,
+ reference-counted object
+
+* ``cpyext`` is slow
+
+* it should "often" work even with large libraries
+ (e.g. ``numpy`` support is mostly there)
+
+
+PyPy: ad
+========
+
+* but, hey, if you need performance out of Python and don't rely
+ critically on C extension modules, then give PyPy a try
+
+ - typical area where it works well: web services
+
+
+CPython C API: the problem
+==========================
+
+* CPython comes with a C API
+
+* very large number of functions
+
+* assumes objects don't move
+
+* assumes a "reference counting" model
+
+
+CPython C API
+=============
+
+* actually, the API is some large subset of the functions inside
+ CPython itself
+
+
+CPython C API
+=============
+
+* easy to use from C
+
+* historically, part of the success of Python
+
+
+CPython C API
+=============
+
+* further successful tools build on top of that API:
+
+ - SWIG
+ - Cython
+ - and other binding generators
+ - now CFFI
+
+
+CFFI
+====
+
+* but CFFI is a bit different
+
+ - it does not expose any part of the CPython C API
+
+ - everything is done with a minimal API on the ``ffi`` object
+ which is closer to C
+
+ - ``ffi.cast()``, ``ffi.new()``, etc.
+
+ - that means it can be directly ported
+
+
+CFFI and PyPy
+=============
+
+* we have a PyPy version of CFFI
+
+* the demos I have given above work equally well on CPython or on PyPy
+
+* (supporting PyPy was part of the core motivation behind CFFI)
+
+
+CFFI: performance
+=================
+
+* in PyPy, JIT compiler speeds up calls, so it's very fast
+
+* in CPython, it doesn't occur, but it is still reasonable when
+ compared with alternatives
+
+* main issue is that we write more code in Python with CFFI,
+ which makes it slower on CPython---but not really on PyPy
+
+
+CFFI: summary
+=============
+
+* call C from Python
+
+* works natively on CPython and on PyPy
+
+ - and easy to port to other Python implementations
+
+* supports CPython 2.6, 2.7, 3.2 to 3.5, and
+ is integrated with PyPy
+
+
+CFFI
+====
+
+* independent on the particular details of the Python implementation
+
+ - using CFFI, you call C functions and manipulate C-pointer-like
+ objects directly from Python
+
+ - you do in Python all logic involving Python objects
+
+ - there are no (official) ways around this API to call the CPython C
+ API, and none are needed
+
+
+CFFI
+====
+
+* two reasons to switch to it ``:-)``
+
+ - easy and cool
+
+ - better supported on non-CPython implementations
+
+* http://cffi.readthedocs.org/
From pypy.commits at gmail.com Mon Aug 1 04:46:59 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 01:46:59 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: draft for jitlog blog post
Message-ID: <579f0c83.8411c20a.bcd4e.fdc1@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5658:8bb00903feae
Date: 2016-08-01 10:19 +0200
http://bitbucket.org/pypy/extradoc/changeset/8bb00903feae/
Log: draft for jitlog blog post
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
new file mode 100644
--- /dev/null
+++ b/blog/draft/new-jit-log.rst
@@ -0,0 +1,54 @@
+PyPy's Toolbox got a new Hammer 🔨
+=======
+
+Tools, tools, tools! It seems that PyPy cannot get enough of them!
+In the last winter sprint (Leysin) covered the current tool for observing interals of the JIT compiler (JitViewer). VMProf at that time already proved that it is a good tool for CPU profiles. We are happy to release a new version of VMProf incooperating a rewritten version of JitViewer.
+
+The old logging format, is a hard to maintain plain text logging facility. Frequent changes often broke internal tools most notably the JITViewer. A second bad taste is that the logging output of a long running program takes a lot of space.
+
+Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And! *durms* you do not need to install JitViewer yourself anymore. The whole system moved to vmprof.com and you can use it any time free of charge.
+
+Sounds great. But what can you do with it? Here are two examples useful for a PyPy user:
+
+PyPy crashed? Did you discover a bug?
+-------------------
+
+For some hard to find bugs it is often necessary to look at the compiled code. The old procedure often required to upload a plain text file which was hard to parse and to look through.
+
+The new way to share a crash report is to install vmprof and execute either of the two commands:
+
+```
+# this program does not crash, but has some weird behaviour
+$ pypy -m jitlog --web
+...
+PyPy Jitlog: http://vmprof.com/#/
+# this program segfaults
+$ pypy -m jitlog -o /tmp/log
+...
+
+$ pypy -m jitlog --upload /tmp/log
+PyPy Jitlog: http://vmprof.com/#/
+```
+
+Providing the link in the bug report enables PyPy developers browse and identify potential issues.
+
+Speed issues
+------------
+
+VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have idenified code that runs slow, you can switch to jitlog and maybe pin point certain aspects that do not behave as expected. You will find not only the overview, but are also able to browse the generated code. If you cannot make sense of that all you can just share the link with us and we can have a look at the compiled code.
+
+Future direction
+----------------
+
+We hope that the new release will help both PyPy developers and PyPy users resolve potential issues and easily point them out.
+
+Here are a few ideas what might come in the next few releases ().
+
+* Extend vmprof.com to be able to query vmprof/jitlog. Some times it is interesting to search for specific patterns the compiler produced. An example for vmprof: 'methods.callsites() > 5' and for the jitlog would be 'traces.contains('call_assembler').hasbridge('*my_func_name*')'
+
+* Combination of CPU profiles and the JITLOG (Sadly did not make it into the current release)
+
+* Extend the jitlog to capture the information of the optimization stage
+
+plan_rich and the PyPy team
+
From pypy.commits at gmail.com Mon Aug 1 08:46:21 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 05:46:21 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: impl. vec_float_xor (missing
change)
Message-ID: <579f449d.d4e01c0a.f1ec7.c83f@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r85954:7510e65f6e05
Date: 2016-08-01 14:45 +0200
http://bitbucket.org/pypy/pypy/changeset/7510e65f6e05/
Log: impl. vec_float_xor (missing change)
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -274,6 +274,8 @@
def genop_vec_int_xor(self, op, arglocs, resloc):
self.mc.PXOR(resloc, arglocs[0])
+ genop_vec_float_xor = genop_vec_int_xor
+
genop_vec_float_arith = """
def genop_vec_float_{type}(self, op, arglocs, resloc):
loc0, loc1, itemsize_loc = arglocs
@@ -639,6 +641,7 @@
consider_vec_int_and = consider_vec_logic
consider_vec_int_or = consider_vec_logic
consider_vec_int_xor = consider_vec_logic
+ consider_vec_float_xor = consider_vec_logic
del consider_vec_logic
def consider_vec_pack_i(self, op):
From pypy.commits at gmail.com Mon Aug 1 10:23:28 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 07:23:28 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: correct the scaling for
vec_load/store
Message-ID: <579f5b60.56421c0a.23487.f41c@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r85955:84c8a9345e22
Date: 2016-08-01 16:22 +0200
http://bitbucket.org/pypy/pypy/changeset/84c8a9345e22/
Log: correct the scaling for vec_load/store
diff --git a/rpython/jit/backend/llsupport/vector_ext.py b/rpython/jit/backend/llsupport/vector_ext.py
--- a/rpython/jit/backend/llsupport/vector_ext.py
+++ b/rpython/jit/backend/llsupport/vector_ext.py
@@ -2,7 +2,7 @@
from rpython.jit.backend.llsupport.descr import (unpack_arraydescr,
unpack_fielddescr, unpack_interiorfielddescr, ArrayDescr)
from rpython.rlib.objectmodel import specialize, always_inline
-from rpython.jit.metainterp.history import (VECTOR, FLOAT, INT)
+from rpython.jit.metainterp.history import (VECTOR, FLOAT, INT, ConstInt)
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp.optimizeopt.schedule import (forwarded_vecinfo,
failnbail_transformation)
@@ -72,7 +72,7 @@
self.argument_restrictions = argument_restris
def check_operation(self, state, pack, op):
- pass
+ return None
def crop_vector(self, op, newsize, size):
return newsize, size
@@ -111,19 +111,25 @@
class LoadRestrict(OpRestrict):
def check_operation(self, state, pack, op):
opnum = op.getopnum()
+ descr = op.getdescr()
+ if not we_are_translated() and not isinstance(descr, ArrayDescr):
+ itemsize = descr.get_item_size_in_bytes()
+ ofs = 0
+ else:
+ itemsize, ofs, _ = unpack_arraydescr(op.getdescr())
+ args = [op.getarg(0), op.getarg(1), ConstInt(1), ConstInt(ofs)]
if rop.is_getarrayitem(opnum) or \
opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F):
- descr = op.getdescr()
- if not we_are_translated() and not isinstance(descr, ArrayDescr):
- itemsize = descr.get_item_size_in_bytes()
- ofs = 0
- else:
- itemsize, ofs, _ = unpack_arraydescr(op.getdescr())
index_box = op.getarg(1)
- _, _, changed, emit = cpu_simplify_scale(state.cpu, index_box, itemsize, ofs)
+ scale, offset, changed, emit = cpu_simplify_scale(state.cpu, index_box, itemsize, ofs)
+ args[2] = ConstInt(scale)
+ args[3] = ConstInt(offset)
if emit:
state.oplist.append(changed)
- op.setarg(1, changed)
+ args[1] = changed
+
+ return args
+
def opcount_filling_vector_register(self, op, vec_reg_size):
assert rop.is_primitive_load(op.opnum)
@@ -136,18 +142,22 @@
def check_operation(self, state, pack, op):
opnum = op.getopnum()
+ descr = op.getdescr()
+ if not we_are_translated() and not isinstance(descr, ArrayDescr):
+ itemsize = descr.get_item_size_in_bytes()
+ ofs = 0
+ else:
+ itemsize, ofs, _ = unpack_arraydescr(op.getdescr())
+ args = [op.getarg(0), op.getarg(1), op.getarg(2), ConstInt(1), ConstInt(ofs)]
if opnum in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW):
- descr = op.getdescr()
- if not we_are_translated() and not isinstance(descr, ArrayDescr):
- itemsize = descr.get_item_size_in_bytes()
- basesize= 0
- else:
- itemsize, basesize, _ = unpack_arraydescr(op.getdescr())
index_box = op.getarg(1)
- _, _, changed, emit = cpu_simplify_scale(state.cpu, index_box, itemsize, basesize)
+ scale, offset, changed, emit = cpu_simplify_scale(state.cpu, index_box, itemsize, ofs)
+ args[3] = ConstInt(scale)
+ args[4] = ConstInt(offset)
if emit:
state.oplist.append(changed)
- op.setarg(1, changed)
+ args[1] = changed
+ return args
def must_crop_vector(self, op, index):
vecinfo = forwarded_vecinfo(op.getarg(index))
@@ -185,6 +195,7 @@
raise NotAVectorizeableLoop()
if curvecinfo.datatype != datatype:
raise NotAVectorizeableLoop()
+ return None
TR_ANY = TypeRestrict()
TR_ANY_FLOAT = TypeRestrict(FLOAT)
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -150,26 +150,11 @@
not_implemented("reduce sum for %s not impl." % arg)
- # TODO remove
- #def _genop_vec_getarrayitem(self, op, arglocs, resloc):
- # # considers item scale (raw_load does not)
- # base_loc, ofs_loc, size_loc, ofs, integer_loc, aligned_loc = arglocs
- # scale = get_scale(size_loc.value)
- # src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale)
- # self._vec_load(resloc, src_addr, integer_loc.value,
- # size_loc.value, aligned_loc.value)
- #
- #genop_vec_getarrayitem_raw_i = _genop_vec_getarrayitem
- #genop_vec_getarrayitem_raw_f = _genop_vec_getarrayitem
- #
- #genop_vec_getarrayitem_gc_i = _genop_vec_getarrayitem
- #genop_vec_getarrayitem_gc_f = _genop_vec_getarrayitem
-
def _genop_vec_load(self, op, arglocs, resloc):
- base_loc, ofs_loc, size_loc, ofs, integer_loc, aligned_loc = arglocs
- src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0)
+ base_loc, ofs_loc, size_loc, scale, ofs, integer_loc = arglocs
+ src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale.value)
self._vec_load(resloc, src_addr, integer_loc.value,
- size_loc.value, aligned_loc.value)
+ size_loc.value, False)
genop_vec_load_i = _genop_vec_load
genop_vec_load_f = _genop_vec_load
@@ -187,23 +172,12 @@
elif itemsize == 8:
self.mc.MOVUPD(resloc, src_addr)
- # TODO remove
- #def _genop_discard_vec_setarrayitem(self, op, arglocs):
- # # considers item scale (raw_store does not)
- # base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, aligned_loc = arglocs
- # scale = get_scale(size_loc.value)
- # dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, scale)
- # self._vec_store(dest_loc, value_loc, integer_loc.value,
- # size_loc.value, aligned_loc.value)
-
- #genop_discard_vec_setarrayitem_raw = _genop_discard_vec_setarrayitem
- #genop_discard_vec_setarrayitem_gc = _genop_discard_vec_setarrayitem
-
def genop_discard_vec_store(self, op, arglocs):
- base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, aligned_loc = arglocs
- dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, 0)
+ base_loc, ofs_loc, value_loc, size_loc, scale,\
+ baseofs, integer_loc = arglocs
+ dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, scale.value)
self._vec_store(dest_loc, value_loc, integer_loc.value,
- size_loc.value, aligned_loc.value)
+ size_loc.value, False)
@always_inline
def _vec_store(self, dest_loc, value_loc, integer, itemsize, aligned):
@@ -550,20 +524,17 @@
assert isinstance(descr, ArrayDescr)
assert not descr.is_array_of_pointers() and \
not descr.is_array_of_structs()
- itemsize, ofs, _ = unpack_arraydescr(descr)
+ itemsize, _, _ = unpack_arraydescr(descr)
integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT)
- aligned = False
args = op.getarglist()
+ scale = get_scale(op.getarg(2).getint())
+ ofs = op.getarg(3).getint()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
- self.perform(op, [base_loc, ofs_loc, imm(itemsize), imm(ofs),
- imm(integer), imm(aligned)], result_loc)
+ self.perform(op, [base_loc, ofs_loc, imm(itemsize), imm(scale),
+ imm(ofs), imm(integer)], result_loc)
- #consider_vec_getarrayitem_raw_i = _consider_vec_getarrayitem
- #consider_vec_getarrayitem_raw_f = _consider_vec_getarrayitem
- #consider_vec_getarrayitem_gc_i = _consider_vec_getarrayitem
- #consider_vec_getarrayitem_gc_f = _consider_vec_getarrayitem
consider_vec_load_i = _consider_vec_load
consider_vec_load_f = _consider_vec_load
@@ -573,20 +544,18 @@
assert isinstance(descr, ArrayDescr)
assert not descr.is_array_of_pointers() and \
not descr.is_array_of_structs()
- itemsize, ofs, _ = unpack_arraydescr(descr)
+ itemsize, _, _ = unpack_arraydescr(descr)
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
value_loc = self.make_sure_var_in_reg(op.getarg(2), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ scale = get_scale(op.getarg(3).getint())
+ ofs = op.getarg(4).getint()
- integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT)
- aligned = False
- self.perform_discard(op, [base_loc, ofs_loc, value_loc,
- imm(itemsize), imm(ofs), imm(integer), imm(aligned)])
-
- #consider_vec_setarrayitem_raw = _consider_vec_setarrayitem
- #consider_vec_setarrayitem_gc = _consider_vec_setarrayitem
- #consider_vec_store = _consider_vec_setarrayitem
+ integer = not (descr.is_array_of_floats() or \
+ descr.getconcrete_type() == FLOAT)
+ self.perform_discard(op, [base_loc, ofs_loc, value_loc, imm(itemsize),
+ imm(scale), imm(ofs), imm(integer)])
def consider_vec_arith(self, op):
lhs = op.getarg(0)
diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py
--- a/rpython/jit/metainterp/optimizeopt/schedule.py
+++ b/rpython/jit/metainterp/optimizeopt/schedule.py
@@ -220,11 +220,17 @@
left = pack.leftmost()
oprestrict = state.cpu.vector_ext.get_operation_restriction(left)
if oprestrict is not None:
- oprestrict.check_operation(state, pack, left)
- args = left.getarglist_copy()
+ newargs = oprestrict.check_operation(state, pack, left)
+ if newargs:
+ args = newargs
+ else:
+ args = left.getarglist_copy()
+ else:
+ args = left.getarglist_copy()
prepare_arguments(state, oprestrict, pack, args)
vecop = VecOperation(left.vector, args, left,
pack.numops(), left.getdescr())
+
for i,node in enumerate(pack.operations):
op = node.getoperation()
if op.returns_void():
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -1077,7 +1077,7 @@
'GETARRAYITEM_GC/2d/rfi',
'GETARRAYITEM_RAW/2d/fi',
'RAW_LOAD/2d/fi',
- 'VEC_LOAD/2d/fi',
+ 'VEC_LOAD/4d/fi',
'_RAW_LOAD_LAST',
'GETINTERIORFIELD_GC/2d/rfi',
@@ -1112,7 +1112,7 @@
'SETARRAYITEM_GC/3d/n',
'SETARRAYITEM_RAW/3d/n',
'RAW_STORE/3d/n',
- 'VEC_STORE/3d/n',
+ 'VEC_STORE/5d/n',
'_RAW_STORE_LAST',
'SETINTERIORFIELD_GC/3d/n',
'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests
From pypy.commits at gmail.com Mon Aug 1 10:44:02 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 07:44:02 -0700 (PDT)
Subject: [pypy-commit] pypy jitlog-exact-source-lines: use offset2lineno as
proposed by cfbolz
Message-ID: <579f6032.09afc20a.a7367.9390@mx.google.com>
Author: Richard Plangger
Branch: jitlog-exact-source-lines
Changeset: r85956:7467ad8d230f
Date: 2016-08-01 16:41 +0200
http://bitbucket.org/pypy/pypy/changeset/7467ad8d230f/
Log: use offset2lineno as proposed by cfbolz
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -42,6 +42,22 @@
from rpython.rlib import rvmprof
return rvmprof.get_unique_id(bytecode)
+ at jit.elidable
+def offset2lineno(bytecode, stopat):
+ # see dis.findlinestarts for an explanation. This function is copied from
+ # rpython/tool/error.py
+ # lnotab is a list of [byte inc, line inc, ...]
+ # even position denote byte increments, odd line increments...
+ tab = bytecode.co_lnotab
+ line = bytecode.co_firstlineno
+ addr = 0
+ for i in range(0, len(tab), 2):
+ addr = addr + ord(tab[i])
+ if addr > stopat:
+ break
+ line = line + ord(tab[i+1])
+ return line
+
@jl.returns(jl.MP_FILENAME, jl.MP_LINENO,
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
@@ -53,7 +69,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, next_instr)
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
From pypy.commits at gmail.com Mon Aug 1 11:44:49 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 08:44:49 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: some tweaks and comments
Message-ID: <579f6e71.17a61c0a.70991.1730@mx.google.com>
Author: Carl Friedrich Bolz
Branch: extradoc
Changeset: r5660:7c07538c059b
Date: 2016-08-01 17:44 +0200
http://bitbucket.org/pypy/extradoc/changeset/7c07538c059b/
Log: some tweaks and comments
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -1,12 +1,18 @@
PyPy's Toolbox got a new Hammer 🔨
=======
+.. : XXX the title is very generic
+
+.. : XXX I don't actually like the first paragraph, I think it should be more
+ to the point. eg that things happened at the Leysin sprint doesn't matter much.
+ I would also add links to all the existing tools
+
Tools, tools, tools! It seems that PyPy cannot get enough of them!
-In the last winter sprint (Leysin) covered the current tool for observing interals of the JIT compiler (JitViewer). VMProf at that time already proved that it is a good tool for CPU profiles. We are happy to release a new version of VMProf incooperating a rewritten version of JitViewer.
+In the last winter sprint (Leysin) covered the current tool for observing internals of the JIT compiler (JitViewer). VMProf at that time already proved that it is a good tool for CPU profiles. We are happy to release a new version of VMProf incorporating a rewritten version of JitViewer.
-The old logging format, is a hard to maintain plain text logging facility. Frequent changes often broke internal tools most notably the JITViewer. A second bad taste is that the logging output of a long running program takes a lot of space.
+The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools, most notably the JITViewer. Another problem was that the logging output of a long running program took a lot of disk space.
-Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And! *durms* you do not need to install JitViewer yourself anymore. The whole system moved to vmprof.com and you can use it any time free of charge.
+Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And *drumroll* you do not need to install JitViewer yourself anymore! The whole system moved to vmprof.com and you can use it any time.
Sounds great. But what can you do with it? Here are two examples useful for a PyPy user:
@@ -15,7 +21,7 @@
For some hard to find bugs it is often necessary to look at the compiled code. The old procedure often required to upload a plain text file which was hard to parse and to look through.
-The new way to share a crash report is to install vmprof and execute either of the two commands:
+The new way to share a crash report is to install the ``vmprof`` module from PyPi and execute either of the two commands:
```
# this program does not crash, but has some weird behaviour
@@ -42,13 +48,13 @@
We hope that the new release will help both PyPy developers and PyPy users resolve potential issues and easily point them out.
-Here are a few ideas what might come in the next few releases ().
+Here are a few ideas what might come in the next few releases:
+
+* Combination of CPU profiles and the JITLOG (Sadly did not make it into the current release)
* Extend vmprof.com to be able to query vmprof/jitlog. Some times it is interesting to search for specific patterns the compiler produced. An example for vmprof: 'methods.callsites() > 5' and for the jitlog would be 'traces.contains('call_assembler').hasbridge('*my_func_name*')'
-* Combination of CPU profiles and the JITLOG (Sadly did not make it into the current release)
-
* Extend the jitlog to capture the information of the optimization stage
-plan_rich and the PyPy team
+Richard Plangger (plan_rich) and the PyPy team
From pypy.commits at gmail.com Mon Aug 1 12:08:11 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 09:08:11 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: a nicer get_printable_location
Message-ID: <579f73eb.45d11c0a.ea683.20ec@mx.google.com>
Author: Carl Friedrich Bolz
Branch: reverse-debugger
Changeset: r85958:fa384c882956
Date: 2016-07-14 10:44 +0200
http://bitbucket.org/pypy/pypy/changeset/fa384c882956/
Log: a nicer get_printable_location
diff --git a/rpython/jit/tl/tla/tla.py b/rpython/jit/tl/tla/tla.py
--- a/rpython/jit/tl/tla/tla.py
+++ b/rpython/jit/tl/tla/tla.py
@@ -60,19 +60,34 @@
# ____________________________________________________________
-CONST_INT = 1
-POP = 2
-ADD = 3
-RETURN = 4
-JUMP_IF = 5
-DUP = 6
-SUB = 7
-NEWSTR = 8
+OPNAMES = []
+HASARG = []
+
+def define_op(name, has_arg=False):
+ globals()[name] = len(OPNAMES)
+ OPNAMES.append(name)
+ HASARG.append(has_arg)
+
+define_op("CONST_INT", True)
+define_op("POP")
+define_op("ADD")
+define_op("RETURN")
+define_op("JUMP_IF", True)
+define_op("DUP")
+define_op("SUB")
+define_op("NEWSTR", True)
+
# ____________________________________________________________
def get_printable_location(pc, bytecode):
- return str(pc)
+ op = ord(bytecode[pc])
+ name = OPNAMES[op]
+ if HASARG[op]:
+ arg = str(ord(bytecode[pc + 1]))
+ else:
+ arg = ''
+ return "%s: %s %s" % (pc, name, arg)
jitdriver = JitDriver(greens=['pc', 'bytecode'],
reds=['self'],
From pypy.commits at gmail.com Mon Aug 1 12:08:09 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 09:08:09 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: add --jit option to the tla
target
Message-ID: <579f73e9.85261c0a.3c27.1cac@mx.google.com>
Author: Carl Friedrich Bolz
Branch: reverse-debugger
Changeset: r85957:49e0ce99e325
Date: 2016-07-14 10:13 +0200
http://bitbucket.org/pypy/pypy/changeset/49e0ce99e325/
Log: add --jit option to the tla target
diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py
--- a/rpython/jit/tl/tla/targettla.py
+++ b/rpython/jit/tl/tla/targettla.py
@@ -4,9 +4,16 @@
def entry_point(args):
- """Main entry point of the stand-alone executable:
- takes a list of strings and returns the exit code.
- """
+ for i in range(len(argv)):
+ if argv[i] == "--jit":
+ if len(argv) == i + 1:
+ print "missing argument after --jit"
+ return 2
+ jitarg = argv[i + 1]
+ del argv[i:i+2]
+ jit.set_user_param(jitdriver, jitarg)
+ break
+
if len(args) < 3:
print "Usage: %s filename x" % (args[0],)
return 2
@@ -26,7 +33,7 @@
return bytecode
def target(driver, args):
- return entry_point, None
+ return entry_point
# ____________________________________________________________
From pypy.commits at gmail.com Mon Aug 1 12:08:13 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 09:08:13 -0700 (PDT)
Subject: [pypy-commit] pypy default: make int * string work too in RPython
Message-ID: <579f73ed.eeb8c20a.79cf4.1d3f@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r85959:2246f93d2550
Date: 2016-08-01 18:07 +0200
http://bitbucket.org/pypy/pypy/changeset/2246f93d2550/
Log: make int * string work too in RPython
diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py
--- a/rpython/rtyper/rstr.py
+++ b/rpython/rtyper/rstr.py
@@ -591,7 +591,9 @@
class __extend__(pairtype(IntegerRepr, AbstractStringRepr)):
def rtype_mul((r_int, r_str), hop):
- return pair(r_str, r_int).rtype_mul(hop)
+ str_repr = r_str.repr
+ v_int, v_str = hop.inputargs(Signed, str_repr)
+ return hop.gendirectcall(r_str.ll.ll_str_mul, v_str, v_int)
rtype_inplace_mul = rtype_mul
diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py
--- a/rpython/rtyper/test/test_rstr.py
+++ b/rpython/rtyper/test/test_rstr.py
@@ -220,11 +220,12 @@
const = self.const
def fn(i, mul):
s = ["", "a", "aba"][i]
- return s * mul
+ return s * mul + mul * s
for i in xrange(3):
for m in [0, 1, 4]:
+ res1 = fn(i, m)
res = self.interpret(fn, [i, m])
- assert self.ll_to_string(res) == fn(i, m)
+ assert self.ll_to_string(res) == res1
def test_is_none(self):
const = self.const
From pypy.commits at gmail.com Mon Aug 1 12:11:10 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 09:11:10 -0700 (PDT)
Subject: [pypy-commit] pypy default: add --jit option to the tla target
Message-ID: <579f749e.17a61c0a.70991.21b8@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r85960:dc10853dfd3b
Date: 2016-07-14 10:13 +0200
http://bitbucket.org/pypy/pypy/changeset/dc10853dfd3b/
Log: add --jit option to the tla target
diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py
--- a/rpython/jit/tl/tla/targettla.py
+++ b/rpython/jit/tl/tla/targettla.py
@@ -4,9 +4,16 @@
def entry_point(args):
- """Main entry point of the stand-alone executable:
- takes a list of strings and returns the exit code.
- """
+ for i in range(len(argv)):
+ if argv[i] == "--jit":
+ if len(argv) == i + 1:
+ print "missing argument after --jit"
+ return 2
+ jitarg = argv[i + 1]
+ del argv[i:i+2]
+ jit.set_user_param(jitdriver, jitarg)
+ break
+
if len(args) < 3:
print "Usage: %s filename x" % (args[0],)
return 2
@@ -26,7 +33,7 @@
return bytecode
def target(driver, args):
- return entry_point, None
+ return entry_point
# ____________________________________________________________
From pypy.commits at gmail.com Mon Aug 1 12:11:12 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 01 Aug 2016 09:11:12 -0700 (PDT)
Subject: [pypy-commit] pypy default: a nicer get_printable_location
Message-ID: <579f74a0.a717c20a.804d7.c497@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r85961:e3e2a10c5153
Date: 2016-07-14 10:44 +0200
http://bitbucket.org/pypy/pypy/changeset/e3e2a10c5153/
Log: a nicer get_printable_location
diff --git a/rpython/jit/tl/tla/tla.py b/rpython/jit/tl/tla/tla.py
--- a/rpython/jit/tl/tla/tla.py
+++ b/rpython/jit/tl/tla/tla.py
@@ -60,19 +60,34 @@
# ____________________________________________________________
-CONST_INT = 1
-POP = 2
-ADD = 3
-RETURN = 4
-JUMP_IF = 5
-DUP = 6
-SUB = 7
-NEWSTR = 8
+OPNAMES = []
+HASARG = []
+
+def define_op(name, has_arg=False):
+ globals()[name] = len(OPNAMES)
+ OPNAMES.append(name)
+ HASARG.append(has_arg)
+
+define_op("CONST_INT", True)
+define_op("POP")
+define_op("ADD")
+define_op("RETURN")
+define_op("JUMP_IF", True)
+define_op("DUP")
+define_op("SUB")
+define_op("NEWSTR", True)
+
# ____________________________________________________________
def get_printable_location(pc, bytecode):
- return str(pc)
+ op = ord(bytecode[pc])
+ name = OPNAMES[op]
+ if HASARG[op]:
+ arg = str(ord(bytecode[pc + 1]))
+ else:
+ arg = ''
+ return "%s: %s %s" % (pc, name, arg)
jitdriver = JitDriver(greens=['pc', 'bytecode'],
reds=['self'],
From pypy.commits at gmail.com Mon Aug 1 13:05:03 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 10:05:03 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: impl. flush_vector_cc for x86
using PBLENDVB
Message-ID: <579f813f.68adc20a.8208b.c48b@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r85962:7673f44c3693
Date: 2016-08-01 19:04 +0200
http://bitbucket.org/pypy/pypy/changeset/7673f44c3693/
Log: impl. flush_vector_cc for x86 using PBLENDVB
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -108,9 +108,20 @@
single_neg_const = '\x00\x00\x00\x80\x00\x00\x00\x80\x00\x00\x00\x80\x00\x00\x00\x80'
zero_const = '\x00' * 16
#
+ two_64bit_ones = '\x01\x00\x00\x00\x00\x00\x00\x00' * 2
+ four_32bit_ones = '\x01\x00\x00\x00' * 4
+ eight_16bit_ones = '\x01\x00' * 8
+ sixteen_8bit_ones = '\x01' * 16
+
+
+
+
+
+ #
data = neg_const + abs_const + \
single_neg_const + single_abs_const + \
- zero_const
+ zero_const + sixteen_8bit_ones + eight_16bit_ones + \
+ four_32bit_ones + two_64bit_ones
datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, [])
float_constants = datablockwrapper.malloc_aligned(len(data), alignment=16)
datablockwrapper.done()
@@ -122,6 +133,7 @@
self.single_float_const_neg_addr = float_constants + 32
self.single_float_const_abs_addr = float_constants + 48
self.expand_byte_mask_addr = float_constants + 64
+ self.element_ones = [float_constants + 80 + 16*i for i in range(4)]
def set_extra_stack_depth(self, mc, value):
if self._is_asmgcc():
diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py
--- a/rpython/jit/backend/x86/rx86.py
+++ b/rpython/jit/backend/x86/rx86.py
@@ -793,6 +793,7 @@
PTEST_xx = xmminsn('\x66', rex_nw, '\x0F\x38\x17', register(1,8), register(2), '\xC0')
PBLENDW_xxi = xmminsn('\x66', rex_nw, '\x0F\x3A\x0E', register(1,8), register(2), '\xC0', immediate(3, 'b'))
+ PBLENDVB_xx = xmminsn('\x66', rex_nw, '\x0F\x38\x10', register(1,8), register(2), '\xC0')
CMPPD_xxi = xmminsn('\x66', rex_nw, '\x0F\xC2', register(1,8), register(2), '\xC0', immediate(3, 'b'))
CMPPS_xxi = xmminsn( rex_nw, '\x0F\xC2', register(1,8), register(2), '\xC0', immediate(3, 'b'))
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -10,7 +10,7 @@
xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14,
X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc)
from rpython.jit.backend.llsupport.vector_ext import VectorExt
-from rpython.jit.backend.llsupport.regalloc import get_scale
+from rpython.jit.backend.llsupport.regalloc import get_scale, TempVar
from rpython.jit.metainterp.resoperation import (rop, ResOperation,
VectorOp, VectorGuardOp)
from rpython.rlib.objectmodel import we_are_translated, always_inline
@@ -33,6 +33,14 @@
raise NotImplementedError(msg)
# DUP END
+class TempVector(TempVar):
+ def __init__(self, type):
+ self.type = type
+ def is_vector(self):
+ return True
+ def __repr__(self):
+ return "" % (id(self),)
+
class X86VectorExt(VectorExt):
def setup_once(self, asm):
if detect_feature.detect_sse4_1():
@@ -292,29 +300,50 @@
self.mc.XORPD(src, heap(self.float_const_neg_addr))
def genop_vec_float_eq(self, op, arglocs, resloc):
- _, rhsloc, sizeloc = arglocs
+ lhsloc, rhsloc, sizeloc = arglocs
size = sizeloc.value
if size == 4:
- self.mc.CMPPS_xxi(resloc.value, rhsloc.value, 0) # 0 means equal
+ self.mc.CMPPS_xxi(lhsloc.value, rhsloc.value, 0) # 0 means equal
else:
- self.mc.CMPPD_xxi(resloc.value, rhsloc.value, 0)
+ self.mc.CMPPD_xxi(lhsloc.value, rhsloc.value, 0)
+ self.flush_vec_cc(rx86.Conditions["E"], lhsloc, resloc, sizeloc.value)
+
+ def flush_vec_cc(self, rev_cond, lhsloc, resloc, size):
+ # After emitting an instruction that leaves a boolean result in
+ # a condition code (cc), call this. In the common case, result_loc
+ # will be set to SPP by the regalloc, which in this case means
+ # "propagate it between this operation and the next guard by keeping
+ # it in the cc". In the uncommon case, result_loc is another
+ # register, and we emit a load from the cc into this register.
+
+ if resloc is ebp:
+ self.guard_success_cc = condition
+ else:
+ assert lhsloc is xmm0
+ maskloc = X86_64_XMM_SCRATCH_REG
+ self.mc.MOVAPD(maskloc, heap(self.element_ones[get_scale(size)]))
+ self.mc.PXOR(resloc, resloc)
+ # note that xmm0 contains true false for each element by the last compare operation
+ self.mc.PBLENDVB_xx(resloc.value, maskloc.value)
def genop_vec_float_ne(self, op, arglocs, resloc):
- _, rhsloc, sizeloc = arglocs
+ lhsloc, rhsloc, sizeloc = arglocs
size = sizeloc.value
# b(100) == 1 << 2 means not equal
if size == 4:
- self.mc.CMPPS_xxi(resloc.value, rhsloc.value, 1 << 2)
+ self.mc.CMPPS_xxi(lhsloc.value, rhsloc.value, 1 << 2)
else:
- self.mc.CMPPD_xxi(resloc.value, rhsloc.value, 1 << 2)
+ self.mc.CMPPD_xxi(lhsloc.value, rhsloc.value, 1 << 2)
+ self.flush_vec_cc(rx86.Conditions("NE"), lhsloc, resloc, sizeloc.value)
def genop_vec_int_eq(self, op, arglocs, resloc):
- _, rhsloc, sizeloc = arglocs
+ lhsloc, rhsloc, sizeloc = arglocs
size = sizeloc.value
- self.mc.PCMPEQ(resloc, rhsloc, size)
+ self.mc.PCMPEQ(lhsloc, rhsloc, size)
+ self.flush_vec_cc(rx86.Conditions("E"), lhsloc, resloc, sizeloc.value)
def genop_vec_int_ne(self, op, arglocs, resloc):
- _, rhsloc, sizeloc = arglocs
+ lhsloc, rhsloc, sizeloc = arglocs
size = sizeloc.value
self.mc.PCMPEQ(resloc, rhsloc, size)
temp = X86_64_XMM_SCRATCH_REG
@@ -325,6 +354,7 @@
# 11 11 11 11
# ----------- pxor
# 00 11 00 00
+ self.flush_vec_cc(rx86.Conditions("NE"), lhsloc, resloc, sizeloc.value)
def genop_vec_int_signext(self, op, arglocs, resloc):
srcloc, sizeloc, tosizeloc = arglocs
@@ -599,9 +629,55 @@
lhs = op.getarg(0)
assert isinstance(lhs, VectorOp)
args = op.getarglist()
+ # we need to use xmm0
+ lhsloc = self.enforce_var_in_vector_reg(op.getarg(0), args, selected_reg=xmm0)
rhsloc = self.make_sure_var_in_reg(op.getarg(1), args)
- lhsloc = self.xrm.force_result_in_reg(op, op.getarg(0), args)
- self.perform(op, [lhsloc, rhsloc, imm(lhs.bytesize)], lhsloc)
+ resloc = self.force_allocate_vector_reg_or_cc(op)
+ self.perform(op, [lhsloc, rhsloc, imm(lhs.bytesize)], resloc)
+
+ def enforce_var_in_vector_reg(self, arg, forbidden_vars, selected_reg):
+ """ Enforce the allocation in a specific register. This can even be a forbidden
+ register. If it is forbidden, it will be moved to another register.
+ Use with caution, currently this is only used for the vectorization backend
+ instructions.
+ """
+ xrm = self.xrm
+ if selected_reg not in xrm.free_regs:
+ variable = None
+ candidate_to_spill = None
+ for var, reg in self.xrm.reg_bindings.items():
+ if reg is selected_reg:
+ variable = var
+ else:
+ if var not in forbidden_vars:
+ candidate_to_spill = var
+ # do we have a free register?
+ if len(xrm.free_regs) == 0:
+ # spill a non forbidden variable
+ self._spill_var(candidate_to_spill, forbidden_vars, None)
+ loc = xrm.free_regs.pop()
+ self.assembler.mov(selected_reg, loc)
+ reg = xrm.reg_bindings.get(arg, None)
+ if reg:
+ xrm.free_regs.append(reg)
+ self.assembler.mov(reg, selected_reg)
+ xrm.reg_bindings[arg] = selected_reg
+ xrm.reg_bindings[variable] = loc
+
+ return selected_reg
+ return self.make_sure_var_in_reg(arg, forbidden_vars, selected_reg=selected_reg)
+
+ def force_allocate_vector_reg_or_cc(self, var):
+ assert var.type == INT
+ if self.next_op_can_accept_cc(self.operations, self.rm.position):
+ # hack: return the ebp location to mean "lives in CC". This
+ # ebp will not actually be used, and the location will be freed
+ # after the next op as usual.
+ self.xrm.force_allocate_frame_reg(var)
+ return ebp
+ else:
+ # else, return a regular register (not ebp).
+ return self.xrm.force_allocate_reg(var)
consider_vec_float_ne = consider_vec_float_eq
consider_vec_int_eq = consider_vec_float_eq
From pypy.commits at gmail.com Mon Aug 1 13:11:20 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 10:11:20 -0700 (PDT)
Subject: [pypy-commit] pypy jitlog-exact-source-lines: deduplicate
offset2lineno, moved comment to rpython/tool/error.py (pypyjit)
Message-ID: <579f82b8.8411c20a.bcd4e.cf5b@mx.google.com>
Author: Richard Plangger
Branch: jitlog-exact-source-lines
Changeset: r85963:3654fa52664d
Date: 2016-08-01 19:09 +0200
http://bitbucket.org/pypy/pypy/changeset/3654fa52664d/
Log: deduplicate offset2lineno, moved comment to rpython/tool/error.py
(pypyjit)
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -42,26 +42,11 @@
from rpython.rlib import rvmprof
return rvmprof.get_unique_id(bytecode)
- at jit.elidable
-def offset2lineno(bytecode, stopat):
- # see dis.findlinestarts for an explanation. This function is copied from
- # rpython/tool/error.py
- # lnotab is a list of [byte inc, line inc, ...]
- # even position denote byte increments, odd line increments...
- tab = bytecode.co_lnotab
- line = bytecode.co_firstlineno
- addr = 0
- for i in range(0, len(tab), 2):
- addr = addr + ord(tab[i])
- if addr > stopat:
- break
- line = line + ord(tab[i+1])
- return line
-
@jl.returns(jl.MP_FILENAME, jl.MP_LINENO,
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from pypy.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
diff --git a/rpython/tool/error.py b/rpython/tool/error.py
--- a/rpython/tool/error.py
+++ b/rpython/tool/error.py
@@ -158,6 +158,8 @@
@jit.elidable
def offset2lineno(c, stopat):
+ # even position in lnotab denote byte increments, odd line increments.
+ # see dis.findlinestarts in the python std. library for more details
tab = c.co_lnotab
line = c.co_firstlineno
addr = 0
From pypy.commits at gmail.com Mon Aug 1 13:11:22 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 10:11:22 -0700 (PDT)
Subject: [pypy-commit] pypy jitlog-exact-source-lines: close branch
Message-ID: <579f82ba.4171c20a.640c9.cf32@mx.google.com>
Author: Richard Plangger
Branch: jitlog-exact-source-lines
Changeset: r85964:19af959d833f
Date: 2016-08-01 19:09 +0200
http://bitbucket.org/pypy/pypy/changeset/19af959d833f/
Log: close branch
From pypy.commits at gmail.com Mon Aug 1 13:11:24 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 10:11:24 -0700 (PDT)
Subject: [pypy-commit] pypy default: merged branch jitlog-exact-source-lines
Message-ID: <579f82bc.8cc51c0a.f2f11.31a3@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r85965:a5b71ba0fa6e
Date: 2016-08-01 19:09 +0200
http://bitbucket.org/pypy/pypy/changeset/a5b71ba0fa6e/
Log: merged branch jitlog-exact-source-lines
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -46,6 +46,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from pypy.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
@@ -53,7 +54,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, next_instr)
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
diff --git a/rpython/tool/error.py b/rpython/tool/error.py
--- a/rpython/tool/error.py
+++ b/rpython/tool/error.py
@@ -158,6 +158,8 @@
@jit.elidable
def offset2lineno(c, stopat):
+ # even position in lnotab denote byte increments, odd line increments.
+ # see dis.findlinestarts in the python std. library for more details
tab = c.co_lnotab
line = c.co_firstlineno
addr = 0
From pypy.commits at gmail.com Mon Aug 1 13:11:26 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 10:11:26 -0700 (PDT)
Subject: [pypy-commit] pypy default: documented branch
Message-ID: <579f82be.c19d1c0a.a744c.2f5c@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r85966:2831d94d2fec
Date: 2016-08-01 19:10 +0200
http://bitbucket.org/pypy/pypy/changeset/2831d94d2fec/
Log: documented branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -105,3 +105,7 @@
.. branch: ep2016sprint
Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
From pypy.commits at gmail.com Mon Aug 1 13:16:54 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 01 Aug 2016 10:16:54 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix switched None and iterable
object on stack in GET_AWAITABLE (maybe a better fix is possible)
Message-ID: <579f8406.2916c20a.16ae1.ddf2@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85967:131f3d3c8682
Date: 2016-08-01 19:16 +0200
http://bitbucket.org/pypy/pypy/changeset/131f3d3c8682/
Log: Fix switched None and iterable object on stack in GET_AWAITABLE
(maybe a better fix is possible)
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1441,6 +1441,13 @@
self.settopvalue(w_iterator)
def GET_AWAITABLE(self, oparg, next_instr):
+ from pypy.objspace.std.noneobject import W_NoneObject
+ if isinstance(self.peekvalue(), W_NoneObject):
+ #switch NoneObject with iterable on stack
+ w_firstnone = self.popvalue()
+ w_i = self.popvalue()
+ self.pushvalue(w_firstnone)
+ self.pushvalue(w_i)
w_iterable = self.peekvalue()
w_iter = w_iterable._GetAwaitableIter(self.space)
self.settopvalue(w_iter)
From pypy.commits at gmail.com Mon Aug 1 14:45:58 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 01 Aug 2016 11:45:58 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Merge with py3k
Message-ID: <579f98e6.031dc20a.f2c35.f616@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85968:2688a6d60ce4
Date: 2016-08-01 20:41 +0200
http://bitbucket.org/pypy/pypy/changeset/2688a6d60ce4/
Log: Merge with py3k
diff too long, truncating to 2000 out of 43939 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -26,3 +26,4 @@
40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3
+7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -85,10 +85,11 @@
pass
def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')):
+ import ast
result = []
for word in re_word.findall(line):
if word.startswith('"'):
- word = eval(word)
+ word = ast.literal_eval(word)
result.append(word)
return result
diff --git a/lib-python/2.7/test/test_hash.py b/lib-python/2.7/test/test_hash.py
--- a/lib-python/2.7/test/test_hash.py
+++ b/lib-python/2.7/test/test_hash.py
@@ -174,7 +174,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
diff --git a/lib-python/3/test/test_unicode.py b/lib-python/3/test/test_unicode.py
--- a/lib-python/3/test/test_unicode.py
+++ b/lib-python/3/test/test_unicode.py
@@ -2604,7 +2604,8 @@
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
- self.assertIsNot(args[0], text)
+ if support.check_impl_detail():
+ self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -418,7 +418,7 @@
RegrTest('test_threading.py', usemodules="thread", core=True),
RegrTest('test_threading_local.py', usemodules="thread", core=True),
RegrTest('test_threadsignals.py', usemodules="thread"),
- RegrTest('test_time.py', core=True, usemodules="struct"),
+ RegrTest('test_time.py', core=True, usemodules="struct thread _rawffi"),
RegrTest('test_timeit.py'),
RegrTest('test_timeout.py'),
RegrTest('test_tk.py'),
@@ -452,7 +452,7 @@
RegrTest('test_userstring.py', core=True),
RegrTest('test_uu.py'),
RegrTest('test_uuid.py'),
- RegrTest('test_venv.py'),
+ RegrTest('test_venv.py', usemodules="struct"),
RegrTest('test_wait3.py', usemodules="thread"),
RegrTest('test_wait4.py', usemodules="thread"),
RegrTest('test_warnings.py', core=True),
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -198,10 +198,13 @@
return tp._alignmentofinstances()
@builtinify
-def byref(cdata):
+def byref(cdata, offset=0):
# "pointer" is imported at the end of this module to avoid circular
# imports
- return pointer(cdata)
+ ptr = pointer(cdata)
+ if offset != 0:
+ ptr._buffer[0] += offset
+ return ptr
def cdata_from_address(self, address):
# fix the address: turn it into as unsigned, in case it's a negative number
diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_build.py
@@ -0,0 +1,91 @@
+# Note: uses the CFFI out-of-line ABI mode. We can't use the API
+# mode because ffi.compile() needs to run the compiler, which
+# needs 'subprocess', which needs 'msvcrt' and '_subprocess',
+# which depend on '_pypy_winbase_cffi' already.
+#
+# Note that if you need to regenerate _pypy_winbase_cffi and
+# can't use a preexisting PyPy to do that, then running this
+# file should work as long as 'subprocess' is not imported
+# by cffi. I had to hack in 'cffi._pycparser' to move an
+#'import subprocess' to the inside of a function. (Also,
+# CPython+CFFI should work as well.)
+#
+# This module supports both msvcrt.py and _subprocess.py.
+
+from cffi import FFI
+
+ffi = FFI()
+
+ffi.set_source("_pypy_winbase_cffi", None)
+
+# ---------- MSVCRT ----------
+
+ffi.cdef("""
+typedef unsigned short wint_t;
+
+int _open_osfhandle(intptr_t osfhandle, int flags);
+intptr_t _get_osfhandle(int fd);
+int _setmode(int fd, int mode);
+int _locking(int fd, int mode, long nbytes);
+
+int _kbhit(void);
+int _getch(void);
+wint_t _getwch(void);
+int _getche(void);
+wint_t _getwche(void);
+int _putch(int);
+wint_t _putwch(wchar_t);
+int _ungetch(int);
+wint_t _ungetwch(wint_t);
+""")
+
+# ---------- SUBPROCESS ----------
+
+ffi.cdef("""
+typedef struct {
+ DWORD cb;
+ char * lpReserved;
+ char * lpDesktop;
+ char * lpTitle;
+ DWORD dwX;
+ DWORD dwY;
+ DWORD dwXSize;
+ DWORD dwYSize;
+ DWORD dwXCountChars;
+ DWORD dwYCountChars;
+ DWORD dwFillAttribute;
+ DWORD dwFlags;
+ WORD wShowWindow;
+ WORD cbReserved2;
+ LPBYTE lpReserved2;
+ HANDLE hStdInput;
+ HANDLE hStdOutput;
+ HANDLE hStdError;
+} STARTUPINFO, *LPSTARTUPINFO;
+
+typedef struct {
+ HANDLE hProcess;
+ HANDLE hThread;
+ DWORD dwProcessId;
+ DWORD dwThreadId;
+} PROCESS_INFORMATION, *LPPROCESS_INFORMATION;
+
+DWORD WINAPI GetVersion(void);
+BOOL WINAPI CreatePipe(PHANDLE, PHANDLE, void *, DWORD);
+BOOL WINAPI CloseHandle(HANDLE);
+HANDLE WINAPI GetCurrentProcess(void);
+BOOL WINAPI DuplicateHandle(HANDLE, HANDLE, HANDLE, LPHANDLE,
+ DWORD, BOOL, DWORD);
+BOOL WINAPI CreateProcessA(char *, char *, void *,
+ void *, BOOL, DWORD, char *,
+ char *, LPSTARTUPINFO, LPPROCESS_INFORMATION);
+DWORD WINAPI WaitForSingleObject(HANDLE, DWORD);
+BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD);
+BOOL WINAPI TerminateProcess(HANDLE, UINT);
+HANDLE WINAPI GetStdHandle(DWORD);
+""")
+
+# --------------------
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_cffi.py
@@ -0,0 +1,10 @@
+# auto-generated file
+import _cffi_backend
+
+ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
+ _version = 0x2601,
+ _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
+ _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
+ _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
+ _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
+)
diff --git a/lib_pypy/_winapi.py b/lib_pypy/_winapi.py
--- a/lib_pypy/_winapi.py
+++ b/lib_pypy/_winapi.py
@@ -10,152 +10,99 @@
# Declare external Win32 functions
-import ctypes
-
-_kernel32 = ctypes.WinDLL('kernel32')
-
-_CloseHandle = _kernel32.CloseHandle
-_CloseHandle.argtypes = [ctypes.c_int]
-_CloseHandle.restype = ctypes.c_int
-
-_CreatePipe = _kernel32.CreatePipe
-_CreatePipe.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
- ctypes.c_void_p, ctypes.c_int]
-_CreatePipe.restype = ctypes.c_int
-
-_GetCurrentProcess = _kernel32.GetCurrentProcess
-_GetCurrentProcess.argtypes = []
-_GetCurrentProcess.restype = ctypes.c_int
+from _pypy_winbase_cffi import ffi as _ffi
+_kernel32 = _ffi.dlopen('kernel32')
GetVersion = _kernel32.GetVersion
-GetVersion.argtypes = []
-GetVersion.restype = ctypes.c_int
-_DuplicateHandle = _kernel32.DuplicateHandle
-_DuplicateHandle.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
- ctypes.POINTER(ctypes.c_int),
- ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_DuplicateHandle.restype = ctypes.c_int
-_WaitForSingleObject = _kernel32.WaitForSingleObject
-_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
-_WaitForSingleObject.restype = ctypes.c_int
+# Now the _subprocess module implementation
-_GetExitCodeProcess = _kernel32.GetExitCodeProcess
-_GetExitCodeProcess.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
-_GetExitCodeProcess.restype = ctypes.c_int
+def _WinError():
+ code, message = _ffi.getwinerror()
+ raise WindowsError(code, message)
-_TerminateProcess = _kernel32.TerminateProcess
-_TerminateProcess.argtypes = [ctypes.c_int, ctypes.c_int]
-_TerminateProcess.restype = ctypes.c_int
+_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
-_GetStdHandle = _kernel32.GetStdHandle
-_GetStdHandle.argtypes = [ctypes.c_int]
-_GetStdHandle.restype = ctypes.c_int
-
-_GetModuleFileNameW = _kernel32.GetModuleFileNameW
-_GetModuleFileNameW.argtypes = [ctypes.c_int, ctypes.c_wchar_p, ctypes.c_uint]
-_GetModuleFileNameW.restype = ctypes.c_int
-
-class _STARTUPINFO(ctypes.Structure):
- _fields_ = [('cb', ctypes.c_int),
- ('lpReserved', ctypes.c_void_p),
- ('lpDesktop', ctypes.c_char_p),
- ('lpTitle', ctypes.c_char_p),
- ('dwX', ctypes.c_int),
- ('dwY', ctypes.c_int),
- ('dwXSize', ctypes.c_int),
- ('dwYSize', ctypes.c_int),
- ('dwXCountChars', ctypes.c_int),
- ('dwYCountChars', ctypes.c_int),
- ("dwFillAttribute", ctypes.c_int),
- ("dwFlags", ctypes.c_int),
- ("wShowWindow", ctypes.c_short),
- ("cbReserved2", ctypes.c_short),
- ("lpReserved2", ctypes.c_void_p),
- ("hStdInput", ctypes.c_int),
- ("hStdOutput", ctypes.c_int),
- ("hStdError", ctypes.c_int)
- ]
-
-class _PROCESS_INFORMATION(ctypes.Structure):
- _fields_ = [("hProcess", ctypes.c_int),
- ("hThread", ctypes.c_int),
- ("dwProcessID", ctypes.c_int),
- ("dwThreadID", ctypes.c_int)]
-
-_CreateProcess = _kernel32.CreateProcessW
-_CreateProcess.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_void_p,
- ctypes.c_int, ctypes.c_int, ctypes.c_wchar_p, ctypes.c_wchar_p,
- ctypes.POINTER(_STARTUPINFO), ctypes.POINTER(_PROCESS_INFORMATION)]
-_CreateProcess.restype = ctypes.c_int
-
-del ctypes
-
-# Now the _winapi module implementation
-
-from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError
-
-class _handle:
- def __init__(self, handle):
- self.handle = handle
+class _handle(object):
+ def __init__(self, c_handle):
+ # 'c_handle' is a cffi cdata of type HANDLE, which is basically 'void *'
+ self.c_handle = c_handle
+ if int(self) != -1:
+ self.c_handle = _ffi.gc(self.c_handle, _kernel32.CloseHandle)
def __int__(self):
- return self.handle
+ return int(_ffi.cast("intptr_t", self.c_handle))
- def __del__(self):
- if self.handle is not None:
- _CloseHandle(self.handle)
+ def __repr__(self):
+ return '<_subprocess.handle %d at 0x%x>' % (int(self), id(self))
def Detach(self):
- handle, self.handle = self.handle, None
- return handle
+ h = int(self)
+ if h != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ return h
def Close(self):
- if self.handle not in (-1, None):
- _CloseHandle(self.handle)
- self.handle = None
+ if int(self) != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ _kernel32.CloseHandle(c_handle)
def CreatePipe(attributes, size):
- read = _c_int()
- write = _c_int()
+ handles = _ffi.new("HANDLE[2]")
- res = _CreatePipe(_byref(read), _byref(write), None, size)
+ res = _kernel32.CreatePipe(handles, handles + 1, _ffi.NULL, size)
if not res:
raise _WinError()
- return _handle(read.value), _handle(write.value)
+ return _handle(handles[0]), _handle(handles[1])
def GetCurrentProcess():
- return _handle(_GetCurrentProcess())
+ return _handle(_kernel32.GetCurrentProcess())
def DuplicateHandle(source_process, source, target_process, access, inherit, options=0):
- target = _c_int()
+ # CPython: the first three arguments are expected to be integers
+ target = _ffi.new("HANDLE[1]")
- res = _DuplicateHandle(int(source_process), int(source), int(target_process),
- _byref(target),
- access, inherit, options)
+ res = _kernel32.DuplicateHandle(
+ _ffi.cast("HANDLE", source_process),
+ _ffi.cast("HANDLE", source),
+ _ffi.cast("HANDLE", target_process),
+ target, access, inherit, options)
if not res:
raise _WinError()
- return _handle(target.value)
+ return _handle(target[0])
+
+def _z(input):
+ if input is None:
+ return _ffi.NULL
+ if isinstance(input, basestring):
+ return str(input)
+ raise TypeError("string/unicode/None expected, got %r" % (
+ type(input).__name__,))
def CreateProcess(name, command_line, process_attr, thread_attr,
inherit, flags, env, start_dir, startup_info):
- si = _STARTUPINFO()
+ si = _ffi.new("STARTUPINFO *")
if startup_info is not None:
si.dwFlags = startup_info.dwFlags
si.wShowWindow = startup_info.wShowWindow
+ # CPython: these three handles are expected to be _handle objects
if startup_info.hStdInput:
- si.hStdInput = int(startup_info.hStdInput)
+ si.hStdInput = startup_info.hStdInput.c_handle
if startup_info.hStdOutput:
- si.hStdOutput = int(startup_info.hStdOutput)
+ si.hStdOutput = startup_info.hStdOutput.c_handle
if startup_info.hStdError:
- si.hStdError = int(startup_info.hStdError)
+ si.hStdError = startup_info.hStdError.c_handle
- pi = _PROCESS_INFORMATION()
+ pi = _ffi.new("PROCESS_INFORMATION *")
flags |= CREATE_UNICODE_ENVIRONMENT
if env is not None:
@@ -164,47 +111,55 @@
envbuf += "%s=%s\0" % (k, v)
envbuf += '\0'
else:
- envbuf = None
+ envbuf = _ffi.NULL
- res = _CreateProcess(name, command_line, None, None, inherit, flags, envbuf,
- start_dir, _byref(si), _byref(pi))
+ res = _kernel32.CreateProcessA(_z(name), _z(command_line), _ffi.NULL,
+ _ffi.NULL, inherit, flags, envbuf,
+ _z(start_dir), si, pi)
if not res:
raise _WinError()
- return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID
+ return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId, pi.dwThreadId
def WaitForSingleObject(handle, milliseconds):
- res = _WaitForSingleObject(int(handle), milliseconds)
-
+ # CPython: the first argument is expected to be an integer.
+ res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
+ milliseconds)
if res < 0:
raise _WinError()
return res
def GetExitCodeProcess(handle):
- code = _c_int()
+ # CPython: the first argument is expected to be an integer.
+ code = _ffi.new("DWORD[1]")
- res = _GetExitCodeProcess(int(handle), _byref(code))
+ res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
if not res:
raise _WinError()
- return code.value
+ return code[0]
def TerminateProcess(handle, exitcode):
- res = _TerminateProcess(int(handle), exitcode)
+ # CPython: the first argument is expected to be an integer.
+ # The second argument is silently wrapped in a UINT.
+ res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+ _ffi.cast("UINT", exitcode))
if not res:
raise _WinError()
def GetStdHandle(stdhandle):
- res = _GetStdHandle(stdhandle)
+ stdhandle = _ffi.cast("DWORD", stdhandle)
+ res = _kernel32.GetStdHandle(stdhandle)
if not res:
return None
else:
- return res
+ # note: returns integer, not handle object
+ return int(_ffi.cast("intptr_t", res))
def CloseHandle(handle):
res = _CloseHandle(handle)
diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
--- a/lib_pypy/cffi/_pycparser/__init__.py
+++ b/lib_pypy/cffi/_pycparser/__init__.py
@@ -10,7 +10,6 @@
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '2.14'
-from subprocess import Popen, PIPE
from .c_parser import CParser
@@ -28,6 +27,7 @@
When successful, returns the preprocessed file's contents.
Errors from cpp will be printed out.
"""
+ from subprocess import Popen, PIPE
path_list = [cpp_path]
if isinstance(cpp_args, list):
path_list += cpp_args
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: greenlet
-Version: 0.4.9
+Version: 0.4.10
Summary: Lightweight in-process concurrent programming
Home-page: https://github.com/python-greenlet/greenlet
Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -2,7 +2,7 @@
import __pypy__
import _continuation
-__version__ = "0.4.9"
+__version__ = "0.4.10"
# ____________________________________________________________
# Exceptions
diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py
--- a/lib_pypy/msvcrt.py
+++ b/lib_pypy/msvcrt.py
@@ -7,26 +7,39 @@
# XXX incomplete: implemented only functions needed by subprocess.py
# PAC: 2010/08 added MS locking for Whoosh
-import ctypes
+# 07/2016: rewrote in CFFI
+
+import sys
+if sys.platform != 'win32':
+ raise ImportError("The 'msvcrt' module is only available on Windows")
+
+import _rawffi
+from _pypy_winbase_cffi import ffi as _ffi
+_lib = _ffi.dlopen(_rawffi.get_libc().name)
+
import errno
-from ctypes_support import standard_c_lib as _c
-from ctypes_support import get_errno
-
-try:
- open_osfhandle = _c._open_osfhandle
-except AttributeError: # we are not on windows
- raise ImportError
try: from __pypy__ import builtinify, validate_fd
except ImportError: builtinify = validate_fd = lambda f: f
-open_osfhandle.argtypes = [ctypes.c_int, ctypes.c_int]
-open_osfhandle.restype = ctypes.c_int
+def _ioerr():
+ e = _ffi.errno
+ raise IOError(e, errno.errorcode[e])
-_get_osfhandle = _c._get_osfhandle
-_get_osfhandle.argtypes = [ctypes.c_int]
-_get_osfhandle.restype = ctypes.c_int
+
+ at builtinify
+def open_osfhandle(fd, flags):
+ """"open_osfhandle(handle, flags) -> file descriptor
+
+ Create a C runtime file descriptor from the file handle handle. The
+ flags parameter should be a bitwise OR of os.O_APPEND, os.O_RDONLY,
+ and os.O_TEXT. The returned file descriptor may be used as a parameter
+ to os.fdopen() to create a file object."""
+ fd = _lib._open_osfhandle(fd, flags)
+ if fd == -1:
+ _ioerr()
+ return fd
@builtinify
def get_osfhandle(fd):
@@ -38,62 +51,74 @@
validate_fd(fd)
except OSError as e:
raise IOError(*e.args)
- return _get_osfhandle(fd)
+ result = _lib._get_osfhandle(fd)
+ if result == -1:
+ _ioerr()
+ return result
-setmode = _c._setmode
-setmode.argtypes = [ctypes.c_int, ctypes.c_int]
-setmode.restype = ctypes.c_int
+ at builtinify
+def setmode(fd, flags):
+ """setmode(fd, mode) -> Previous mode
+
+ Set the line-end translation mode for the file descriptor fd. To set
+ it to text mode, flags should be os.O_TEXT; for binary, it should be
+ os.O_BINARY."""
+ flags = _lib._setmode(fd, flags)
+ if flags == -1:
+ _ioerr()
+ return flags
LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5)
-_locking = _c._locking
-_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_locking.restype = ctypes.c_int
-
@builtinify
def locking(fd, mode, nbytes):
- '''lock or unlock a number of bytes in a file.'''
- rv = _locking(fd, mode, nbytes)
+ """"locking(fd, mode, nbytes) -> None
+
+ Lock part of a file based on file descriptor fd from the C runtime.
+ Raises IOError on failure. The locked region of the file extends from
+ the current file position for nbytes bytes, and may continue beyond
+ the end of the file. mode must be one of the LK_* constants listed
+ below. Multiple regions in a file may be locked at the same time, but
+ may not overlap. Adjacent regions are not merged; they must be unlocked
+ individually."""
+ rv = _lib._locking(fd, mode, nbytes)
if rv != 0:
- e = get_errno()
- raise IOError(e, errno.errorcode[e])
+ _ioerr()
# Console I/O routines
-kbhit = _c._kbhit
-kbhit.argtypes = []
-kbhit.restype = ctypes.c_int
+kbhit = _lib._kbhit
-getch = _c._getch
-getch.argtypes = []
-getch.restype = ctypes.c_char
+ at builtinify
+def getch():
+ return chr(_lib._getch())
-getwch = _c._getwch
-getwch.argtypes = []
-getwch.restype = ctypes.c_wchar
+ at builtinify
+def getwch():
+ return unichr(_lib._getwch())
-getche = _c._getche
-getche.argtypes = []
-getche.restype = ctypes.c_char
+ at builtinify
+def getche():
+ return chr(_lib._getche())
-getwche = _c._getwche
-getwche.argtypes = []
-getwche.restype = ctypes.c_wchar
+ at builtinify
+def getwche():
+ return unichr(_lib._getwche())
-putch = _c._putch
-putch.argtypes = [ctypes.c_char]
-putch.restype = None
+ at builtinify
+def putch(ch):
+ _lib._putch(ord(ch))
-putwch = _c._putwch
-putwch.argtypes = [ctypes.c_wchar]
-putwch.restype = None
+ at builtinify
+def putwch(ch):
+ _lib._putwch(ord(ch))
-ungetch = _c._ungetch
-ungetch.argtypes = [ctypes.c_char]
-ungetch.restype = None
+ at builtinify
+def ungetch(ch):
+ if _lib._ungetch(ord(ch)) == -1: # EOF
+ _ioerr()
-ungetwch = _c._ungetwch
-ungetwch.argtypes = [ctypes.c_wchar]
-ungetwch.restype = None
-
-del ctypes
+ at builtinify
+def ungetwch(ch):
+ if _lib._ungetwch(ord(ch)) == -1: # EOF
+ _ioerr()
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -40,7 +40,7 @@
"binascii", "_multiprocessing", '_warnings', "_collections",
"_multibytecodec", "_continuation", "_cffi_backend",
"_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy"
- "faulthandler",
+ "faulthandler", "_jitlog",
])
from rpython.jit.backend import detect_cpu
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -94,6 +94,20 @@
def pytest_pycollect_makemodule(path, parent):
return PyPyModule(path, parent)
+def is_applevel(item):
+ from pypy.tool.pytest.apptest import AppTestFunction
+ return isinstance(item, AppTestFunction)
+
+def pytest_collection_modifyitems(config, items):
+ if config.option.runappdirect:
+ return
+ for item in items:
+ if isinstance(item, py.test.Function):
+ if is_applevel(item):
+ item.add_marker('applevel')
+ else:
+ item.add_marker('interplevel')
+
class PyPyModule(py.test.collect.Module):
""" we take care of collecting classes both at app level
and at interp-level (because we need to stick a space
@@ -128,9 +142,6 @@
if name.startswith('AppTest'):
from pypy.tool.pytest.apptest import AppClassCollector
return AppClassCollector(name, parent=self)
- else:
- from pypy.tool.pytest.inttest import IntClassCollector
- return IntClassCollector(name, parent=self)
elif hasattr(obj, 'func_code') and self.funcnamefilter(name):
if name.startswith('app_test_'):
@@ -138,11 +149,7 @@
"generator app level functions? you must be joking"
from pypy.tool.pytest.apptest import AppTestFunction
return AppTestFunction(name, parent=self)
- elif obj.func_code.co_flags & 32: # generator function
- return pytest.Generator(name, parent=self)
- else:
- from pypy.tool.pytest.inttest import IntTestFunction
- return IntTestFunction(name, parent=self)
+ return super(PyPyModule, self).makeitem(name, obj)
def skip_on_missing_buildoption(**ropts):
__tracebackhide__ = True
@@ -171,28 +178,19 @@
def pytest_runtest_setup(__multicall__, item):
if isinstance(item, py.test.collect.Function):
- appclass = item.getparent(PyPyClassCollector)
+ appclass = item.getparent(py.test.Class)
if appclass is not None:
# Make cls.space and cls.runappdirect available in tests.
spaceconfig = getattr(appclass.obj, 'spaceconfig', None)
if spaceconfig is not None:
from pypy.tool.pytest.objspace import gettestobjspace
appclass.obj.space = gettestobjspace(**spaceconfig)
+ else:
+ appclass.obj.space = LazyObjSpaceGetter()
appclass.obj.runappdirect = option.runappdirect
__multicall__.execute()
-class PyPyClassCollector(py.test.collect.Class):
- # All pypy Test classes have a "space" member.
- def setup(self):
- cls = self.obj
- if not hasattr(cls, 'spaceconfig'):
- cls.space = LazyObjSpaceGetter()
- else:
- assert hasattr(cls, 'space') # set by pytest_runtest_setup
- super(PyPyClassCollector, self).setup()
-
-
def pytest_ignore_collect(path):
return path.check(link=1)
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -104,27 +104,24 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev libgc-dev liblzma-dev
-
-For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
+ tk-dev libgc-dev \
+ liblzma-dev # For lzma on PyPy3.
On Fedora::
dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
- gdbm-devel
-
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
+ gdbm-devel \
+ xz-devel # For lzma on PyPy3.
On SLES11::
zypper install gcc make python-devel pkg-config \
zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
- libexpat-devel libffi-devel python-curses
+ libexpat-devel libffi-devel python-curses \
+ xz-devel # For lzma on PyPy3.
(XXX plus the SLES11 version of libgdbm-dev and tk-dev)
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
-
On Mac OS X, most of these build-time dependencies are installed alongside
the Developer Tools. However, note that in order for the installation to
find them you may need to run::
diff --git a/pypy/doc/config/commandline.txt b/pypy/doc/config/commandline.txt
--- a/pypy/doc/config/commandline.txt
+++ b/pypy/doc/config/commandline.txt
@@ -9,7 +9,7 @@
PyPy Python interpreter options
-------------------------------
-The following options can be used after ``translate.py
+The following options can be used after ``rpython
targetpypystandalone`` or as options to ``py.py``.
.. GENERATE: objspace
@@ -22,7 +22,7 @@
General translation options
---------------------------
-The following are options of ``translate.py``. They must be
+The following are options of ``bin/rpython``. They must be
given before the ``targetxxx`` on the command line.
* `--opt -O:`__ set the optimization level `[0, 1, size, mem, 2, 3]`
diff --git a/pypy/doc/config/index.rst b/pypy/doc/config/index.rst
--- a/pypy/doc/config/index.rst
+++ b/pypy/doc/config/index.rst
@@ -15,12 +15,12 @@
./py.py <`objspace options`_>
-and the ``translate.py`` translation entry
+and the ``rpython/bin/rpython`` translation entry
point which takes arguments of this form:
.. parsed-literal::
- ./translate.py <`translation options`_>
+ ./rpython/bin/rpython <`translation options`_>
For the common case of ```` being ``targetpypystandalone.py``,
you can then pass the `object space options`_ after
@@ -28,7 +28,7 @@
.. parsed-literal::
- ./translate.py <`translation options`_> targetpypystandalone.py <`objspace options`_>
+ ./rpython/bin/rpython <`translation options`_> targetpypystandalone.py <`objspace options`_>
There is an `overview`_ of all command line arguments that can be
passed in either position.
diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst
--- a/pypy/doc/config/opt.rst
+++ b/pypy/doc/config/opt.rst
@@ -4,8 +4,8 @@
This meta-option selects a default set of optimization
settings to use during a translation. Usage::
- translate.py --opt=#
- translate.py -O#
+ bin/rpython --opt=#
+ bin/rpython -O#
where ``#`` is the desired optimization level. The valid choices are:
diff --git a/pypy/doc/config/translation.dont_write_c_files.txt b/pypy/doc/config/translation.dont_write_c_files.txt
--- a/pypy/doc/config/translation.dont_write_c_files.txt
+++ b/pypy/doc/config/translation.dont_write_c_files.txt
@@ -1,4 +1,4 @@
write the generated C files to ``/dev/null`` instead of to the disk. Useful if
-you want to use translate.py as a benchmark and don't want to access the disk.
+you want to use translation as a benchmark and don't want to access the disk.
.. _`translation documentation`: ../translation.html
diff --git a/pypy/doc/config/translation.fork_before.txt b/pypy/doc/config/translation.fork_before.txt
--- a/pypy/doc/config/translation.fork_before.txt
+++ b/pypy/doc/config/translation.fork_before.txt
@@ -1,4 +1,4 @@
This is an option mostly useful when working on the PyPy toolchain. If you use
-it, translate.py will fork before the specified phase. If the translation
+it, translation will fork before the specified phase. If the translation
crashes after that fork, you can fix the bug in the toolchain, and continue
translation at the fork-point.
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -122,7 +122,7 @@
$ hg up reflex-support # optional
# This example shows python, but using pypy-c is faster and uses less memory
- $ python rpython/translator/goal/translate.py --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
+ $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
This will build a ``pypy-c`` that includes the cppyy module, and through that,
Reflex support.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -315,13 +315,28 @@
- ``complex``
+ - ``str`` (empty or single-character strings only)
+
+ - ``unicode`` (empty or single-character strings only)
+
+ - ``tuple`` (empty tuples only)
+
+ - ``frozenset`` (empty frozenset only)
+
This change requires some changes to ``id`` as well. ``id`` fulfills the
following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the
above types will return a value that is computed from the argument, and can
thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
-Notably missing from the list above are ``str`` and ``unicode``. If your
-code relies on comparing strings with ``is``, then it might break in PyPy.
+Note that strings of length 2 or greater can be equal without being
+identical. Similarly, ``x is (2,)`` is not necessarily true even if
+``x`` contains a tuple and ``x == (2,)``. The uniqueness rules apply
+only to the particular cases described above. The ``str``, ``unicode``,
+``tuple`` and ``frozenset`` rules were added in PyPy 5.4; before that, a
+test like ``if x is "?"`` or ``if x is ()`` could fail even if ``x`` was
+equal to ``"?"`` or ``()``. The new behavior added in PyPy 5.4 is
+closer to CPython's, which caches precisely the empty tuple/frozenset,
+and (generally but not always) the strings and unicodes of length <= 1.
Note that for floats there "``is``" only one object per "bit pattern"
of the float. So ``float('nan') is float('nan')`` is true on PyPy,
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -335,3 +335,60 @@
This will disable SELinux's protection and allow PyPy to configure correctly.
Be sure to enable it again if you need it!
+
+
+How should I report a bug?
+--------------------------
+
+Our bug tracker is here: https://bitbucket.org/pypy/pypy/issues/
+
+Missing features or incompatibilities with CPython are considered
+bugs, and they are welcome. (See also our list of `known
+incompatibilities`__.)
+
+.. __: http://pypy.org/compat.html
+
+For bugs of the kind "I'm getting a PyPy crash or a strange
+exception", please note that: **We can't do anything without
+reproducing the bug ourselves**. We cannot do anything with
+tracebacks from gdb, or core dumps. This is not only because the
+standard PyPy is compiled without debug symbols. The real reason is
+that a C-level traceback is usually of no help at all in PyPy.
+Debugging PyPy can be annoying.
+
+In more details:
+
+* First, please give the exact PyPy version, and the OS.
+
+* It might help focus our search if we know if the bug can be
+ reproduced on a "``pypy --jit off``" or not. If "``pypy --jit
+ off``" always works, then the problem might be in the JIT.
+ Otherwise, we know we can ignore that part.
+
+* If you got the bug using only Open Source components, please give a
+ step-by-step guide that we can follow to reproduce the problem
+ ourselves. Don't assume we know anything about any program other
+ than PyPy. We would like a guide that we can follow point by point
+ (without guessing or having to figure things out)
+ on a machine similar to yours, starting from a bare PyPy, until we
+ see the same problem. (If you can, you can try to reduce the number
+ of steps and the time it needs to run, but that is not mandatory.)
+
+* If the bug involves Closed Source components, or just too many Open
+ Source components to install them all ourselves, then maybe you can
+ give us some temporary ssh access to a machine where the bug can be
+ reproduced. Or, maybe we can download a VirtualBox or VMWare
+ virtual machine where the problem occurs.
+
+* If giving us access would require us to use tools other than ssh,
+ make appointments, or sign a NDA, then we can consider a commerical
+ support contract for a small sum of money.
+
+* If even that is not possible for you, then sorry, we can't help.
+
+Of course, you can try to debug the problem yourself, and we can help
+you get started if you ask on the #pypy IRC channel, but be prepared:
+debugging an annoying PyPy problem usually involves quite a lot of gdb
+in auto-generated C code, and at least some knowledge about the
+various components involved, from PyPy's own RPython source code to
+the GC and possibly the JIT.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-pypy2.7-v5.3.1.rst
release-pypy2.7-v5.3.0.rst
release-5.1.1.rst
release-5.1.0.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-pypy2-5.3.1.rst
whatsnew-pypy2-5.3.0.rst
whatsnew-5.1.0.rst
whatsnew-5.0.0.rst
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -39,17 +39,16 @@
library.
If you want to install 3rd party libraries, the most convenient way is
-to install pip_ (unless you want to install virtualenv as explained
-below; then you can directly use pip inside virtualenvs):
+to install pip_ using ensurepip_ (unless you want to install virtualenv as
+explained below; then you can directly use pip inside virtualenvs):
.. code-block:: console
- $ curl -O https://bootstrap.pypa.io/get-pip.py
- $ ./pypy-2.1/bin/pypy get-pip.py
- $ ./pypy-2.1/bin/pip install pygments # for example
+ $ ./pypy-xxx/bin/pypy -m ensurepip
+ $ ./pypy-xxx/bin/pip install pygments # for example
-Third party libraries will be installed in ``pypy-2.1/site-packages``, and
-the scripts in ``pypy-2.1/bin``.
+Third party libraries will be installed in ``pypy-xxx/site-packages``, and
+the scripts in ``pypy-xxx/bin``.
Installing using virtualenv
@@ -61,7 +60,7 @@
checkout::
# from a tarball
- $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env
+ $ virtualenv -p /opt/pypy-xxx/bin/pypy my-pypy-env
# from the mercurial checkout
$ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env
@@ -69,7 +68,7 @@
Note that bin/python is now a symlink to bin/pypy.
.. _pip: http://pypi.python.org/pypi/pip
-
+.. _ensurepip: https://docs.python.org/2.7/library/ensurepip.html
Building PyPy yourself
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pypy/doc/release-pypy2.7-v5.3.1.rst b/pypy/doc/release-pypy2.7-v5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy2.7-v5.3.1.rst
@@ -0,0 +1,41 @@
+==========
+PyPy 5.3.1
+==========
+
+We have released a bugfix for PyPy2.7-v5.3.0, released last week,
+due to issues_ reported by users.
+
+Thanks to those who reported the issues.
+
+.. _issues: http://doc.pypy.org/en/latest/whatsnew-pypy2-5.3.1.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,13 @@
.. this is a revision shortly after release-pypy2.7-v5.3
.. startrev: 873218a739f1
+.. 418b05f95db5
+Improve CPython compatibility for ``is``. Now code like ``if x is ():``
+works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id .
+
+.. pull request #455
+Add sys.{get,set}dlopenflags, for cpyext extensions.
+
.. branch: fix-gen-dfa
Resolves an issue with the generator script to build the dfa for Python syntax.
@@ -19,3 +26,82 @@
.. branch: s390x-5.3-catchup
Implement the backend related changes for s390x.
+
+.. branch: incminimark-ll_assert
+.. branch: vmprof-openbsd
+
+.. branch: testing-cleanup
+
+Simplify handling of interp-level tests and make it more forward-
+compatible.
+
+.. branch: pyfile-tell
+Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile
+
+.. branch: rw-PyString_AS_STRING
+Allow rw access to the char* returned from PyString_AS_STRING, also refactor
+PyStringObject to look like cpython's and allow subclassing PyString_Type and
+PyUnicode_Type
+
+.. branch: save_socket_errno
+
+Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show
+the errno of the failing system call, but instead some random previous
+errno.
+
+.. branch: PyTuple_Type-subclass
+
+Refactor PyTupleObject to look like cpython's and allow subclassing
+PyTuple_Type
+
+.. branch: call-via-pyobj
+
+Use offsets from PyTypeObject to find actual c function to call rather than
+fixed functions, allows function override after PyType_Ready is called
+
+.. branch: issue2335
+
+Avoid exhausting the stack in the JIT due to successive guard
+failures in the same Python function ending up as successive levels of
+RPython functions, while at app-level the traceback is very short
+
+.. branch: use-madv-free
+
+Try harder to memory to the OS. See e.g. issue #2336. Note that it does
+not show up as a reduction of the VIRT column in ``top``, and the RES
+column might also not show the reduction, particularly on Linux >= 4.5 or
+on OS/X: it uses MADV_FREE, which only marks the pages as returnable to
+the OS if the memory is low.
+
+.. branch: cpyext-slotdefs2
+
+Fill in more slots when creating a PyTypeObject from a W_TypeObject
+More slots are still TBD, like tp_print and richcmp
+
+.. branch: json-surrogates
+
+Align json module decode with the cpython's impl, fixes issue 2345
+
+.. branch: issue2343
+
+Copy CPython's logic more closely for handling of ``__instancecheck__()``
+and ``__subclasscheck__()``. Fixes issue 2343.
+
+.. branch: msvcrt-cffi
+
+Rewrite the Win32 dependencies of 'subprocess' to use cffi instead
+of ctypes. This avoids importing ctypes in many small programs and
+scripts, which in turn avoids enabling threads (because ctypes
+creates callbacks at import time, and callbacks need threads).
+
+.. branch: new-jit-log
+
+The new logging facility that integrates with and adds features to vmprof.com.
+
+.. branch: jitlog-32bit
+
+Resolve issues to use the new logging facility on a 32bit system
+
+.. branch: ep2016sprint
+
+Trying harder to make hash(-1) return -2, like it does on CPython
diff --git a/pypy/doc/whatsnew-pypy2-5.3.1.rst b/pypy/doc/whatsnew-pypy2-5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy2-5.3.1.rst
@@ -0,0 +1,15 @@
+===========================
+What's new in PyPy2.7 5.3.1
+===========================
+
+.. this is a revision shortly after release-pypy2.7-v5.3.0
+.. startrev: f4d726d1a010
+
+
+A bug-fix release, merging these changes:
+
+ * Add include guards to pymem.h, fixes issue #2321
+
+ * Make vmprof build on OpenBSD, from pull request #456
+
+ * Fix ``bytearray('').replace('a', 'ab')``, issue #2324
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -416,28 +416,12 @@
try_node.get_lineno(), try_node.get_column())
def handle_with_stmt(self, with_node, is_async):
- body = self.handle_suite(with_node.get_child(-1))
- i = with_node.num_children() - 1
- while True:
- i -= 2
- item = with_node.get_child(i)
- test = self.handle_expr(item.get_child(0))
- if item.num_children() == 3:
- target = self.handle_expr(item.get_child(2))
- self.set_context(target, ast.Store)
- else:
- target = None
if is_async:
wi = ast.AsyncWith(test, target, body, with_node.get_lineno(),
with_node.get_column())
else:
wi = ast.With(test, target, body, with_node.get_lineno(),
with_node.get_column())
- if i == 1:
- break
- body = [wi]
- return wi
-
def handle_with_item(self, item_node):
test = self.handle_expr(item_node.get_child(0))
if item_node.num_children() == 3:
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -1129,7 +1129,7 @@
assert space.eq_w(s.s, space.wrap("hi implicitly extra"))
s = self.get_first_expr("b'hi' b' implicitly' b' extra'")
assert isinstance(s, ast.Bytes)
- assert space.eq_w(s.s, space.wrapbytes("hi implicitly extra"))
+ assert space.eq_w(s.s, space.newbytes("hi implicitly extra"))
raises(SyntaxError, self.get_first_expr, "b'hello' 'world'")
sentence = u"Die Männer ärgen sich!"
source = u"# coding: utf-7\nstuff = '%s'" % (sentence,)
@@ -1184,7 +1184,7 @@
s = ast_from_node(space, tree, info).body[0].value
assert isinstance(s, ast.Str)
assert space.eq_w(s.s, space.wrap(u'Ç'))
-
+
def test_string_bug(self):
space = self.space
source = '# -*- encoding: utf8 -*-\nstuff = "x \xc3\xa9 \\n"\n'
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -965,7 +965,20 @@
"""
self.simple_test(source, 'ok', 1)
- def test_remove_docstring(self):
+ @py.test.mark.parametrize('expr, result', [
+ ("f1.__doc__", None),
+ ("f2.__doc__", 'docstring'),
+ ("f2()", 'docstring'),
+ ("f3.__doc__", None),
+ ("f3()", 'bar'),
+ ("C1.__doc__", None),
+ ("C2.__doc__", 'docstring'),
+ ("C3.field", 'not docstring'),
+ ("C4.field", 'docstring'),
+ ("C4.__doc__", 'docstring'),
+ ("C4.__doc__", 'docstring'),
+ ("__doc__", None),])
+ def test_remove_docstring(self, expr, result):
source = '"module_docstring"\n' + """if 1:
def f1():
'docstring'
@@ -989,19 +1002,7 @@
code_w.remove_docstrings(self.space)
dict_w = self.space.newdict();
code_w.exec_code(self.space, dict_w, dict_w)
-
- yield self.check, dict_w, "f1.__doc__", None
- yield self.check, dict_w, "f2.__doc__", 'docstring'
- yield self.check, dict_w, "f2()", 'docstring'
- yield self.check, dict_w, "f3.__doc__", None
- yield self.check, dict_w, "f3()", 'bar'
- yield self.check, dict_w, "C1.__doc__", None
- yield self.check, dict_w, "C2.__doc__", 'docstring'
- yield self.check, dict_w, "C3.field", 'not docstring'
- yield self.check, dict_w, "C4.field", 'docstring'
- yield self.check, dict_w, "C4.__doc__", 'docstring'
- yield self.check, dict_w, "C4.__doc__", 'docstring'
- yield self.check, dict_w, "__doc__", None
+ self.check(dict_w, expr, result)
def test_assert_skipping(self):
space = self.space
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -422,7 +422,7 @@
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
raise oefmt(space.w_TypeError,
- "AST string must be of type str or unicode")
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1056,7 +1056,7 @@
return (None, None)
def newlist_bytes(self, list_s):
- return self.newlist([self.wrapbytes(s) for s in list_s])
+ return self.newlist([self.newbytes(s) for s in list_s])
def newlist_unicode(self, list_u):
return self.newlist([self.wrap(u) for u in list_u])
@@ -1072,7 +1072,7 @@
return make_empty_list_with_size(self, sizehint)
def wrap_fsdecoded(self, x):
- return self.fsdecode(self.wrapbytes(x))
+ return self.fsdecode(self.newbytes(x))
@jit.unroll_safe
def exception_match(self, w_exc_type, w_check_class):
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -958,7 +958,7 @@
elif name != '__args__' and name != 'args_w':
spec = unwrap_spec[i]
if isinstance(defaultval, str) and spec not in [str]:
- defs_w.append(space.wrapbytes(defaultval))
+ defs_w.append(space.newbytes(defaultval))
else:
defs_w.append(space.wrap(defaultval))
if self._code._unwrap_spec:
@@ -978,7 +978,7 @@
if isinstance(spec, WrappedDefault):
default_value = spec.default_value
if isinstance(default_value, str):
- w_default = space.wrapbytes(default_value)
+ w_default = space.newbytes(default_value)
else:
w_default = space.wrap(default_value)
assert isinstance(w_default, W_Root)
diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py
--- a/pypy/interpreter/main.py
+++ b/pypy/interpreter/main.py
@@ -19,7 +19,7 @@
def compilecode(space, source, filename, cmd='exec'):
w = space.wrap
w_code = space.builtin.call(
- 'compile', space.wrapbytes(source), space.wrap_fsdecoded(filename),
+ 'compile', space.newbytes(source), space.wrap_fsdecoded(filename),
w(cmd), w(0), w(0))
pycode = space.interp_w(eval.Code, w_code)
return pycode
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -28,6 +28,7 @@
def unpack_str_tuple(space,w_str_tuple):
return [space.str_w(w_el) for w_el in space.unpackiterable(w_str_tuple)]
+
# Magic numbers for the bytecode version in code objects.
# See comments in pypy/module/imp/importing.
cpython_magic, = struct.unpack("' % (
name, unicode(self.getaddrstring(space)), fn,
-1 if self.co_firstlineno == 0 else self.co_firstlineno))
diff --git a/pypy/interpreter/pyparser/automata.py b/pypy/interpreter/pyparser/automata.py
--- a/pypy/interpreter/pyparser/automata.py
+++ b/pypy/interpreter/pyparser/automata.py
@@ -13,12 +13,11 @@
# PYPY Modification: removed the EMPTY class as it's not needed here
-# PYPY Modification: we don't need a particuliar DEFAULT class here
-# a simple None works fine.
-# (Having a DefaultClass inheriting from str makes
-# the annotator crash)
-DEFAULT = "\00default" # XXX hack, the rtyper does not support dict of with str|None keys
- # anyway using dicts doesn't seem the best final way to store these char indexed tables
+# PYPY Modification: DEFAULT is a singleton, used only in the pre-RPython
+# dicts (see pytokenize.py). Then DFA.__init__() turns these dicts into
+# more compact strings.
+DEFAULT = object()
+
# PYPY Modification : removed all automata functions (any, maybe,
# newArcPair, etc.)
diff --git a/pypy/interpreter/pyparser/error.py b/pypy/interpreter/pyparser/error.py
--- a/pypy/interpreter/pyparser/error.py
+++ b/pypy/interpreter/pyparser/error.py
@@ -25,7 +25,7 @@
'replace')
w_text = space.wrap(text)
if self.filename is not None:
- w_filename = space.fsdecode(space.wrapbytes(self.filename))
+ w_filename = space.fsdecode(space.newbytes(self.filename))
return space.newtuple([space.wrap(self.msg),
space.newtuple([w_filename,
space.wrap(self.lineno),
diff --git a/pypy/interpreter/pyparser/gendfa.py b/pypy/interpreter/pyparser/gendfa.py
--- a/pypy/interpreter/pyparser/gendfa.py
+++ b/pypy/interpreter/pyparser/gendfa.py
@@ -294,7 +294,7 @@
i = 0
for k, v in sorted(state.items()):
i += 1
- if k == '\x00default':
+ if k == DEFAULT:
k = "automata.DEFAULT"
else:
k = repr(k)
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -85,13 +85,13 @@
if rawmode or '\\' not in substr:
if not unicode_literal:
- return space.wrapbytes(substr)
+ return space.newbytes(substr)
else:
v = unicodehelper.decode_utf8(space, substr)
return space.wrap(v)
v = PyString_DecodeEscape(space, substr, 'strict', encoding)
- return space.wrapbytes(v)
+ return space.newbytes(v)
def decode_unicode_utf8(space, s, ps, q):
# ****The Python 2.7 version, producing UTF-32 escapes****
diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -7,7 +7,7 @@
def recode_to_utf8(space, bytes, encoding):
if encoding == 'utf-8':
return bytes
- w_text = space.call_method(space.wrapbytes(bytes), "decode",
+ w_text = space.call_method(space.newbytes(bytes), "decode",
space.wrap(encoding))
w_recoded = space.call_method(w_text, "encode", space.wrap("utf-8"))
return space.bytes_w(w_recoded)
diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py
--- a/pypy/interpreter/test/test_gateway.py
+++ b/pypy/interpreter/test/test_gateway.py
@@ -471,12 +471,12 @@
space = self.space
w = space.wrap
def f(filename):
- return space.wrapbytes(filename)
+ return space.newbytes(filename)
app_f = gateway.interp2app_temp(f, unwrap_spec=['fsencode'])
w_app_f = space.wrap(app_f)
assert space.eq_w(
space.call_function(w_app_f, w(u'\udc80')),
- space.wrapbytes('\x80'))
+ space.newbytes('\x80'))
def test_interp2app_unwrap_spec_typechecks(self):
from rpython.rlib.rarithmetic import r_longlong
@@ -801,7 +801,7 @@
w_g = space.wrap(gateway.interp2app_temp(g))
args = argument.Arguments(space, [])
w_res = space.call_args(w_g, args)
- assert space.eq_w(w_res, space.wrapbytes('foo'))
+ assert space.eq_w(w_res, space.newbytes('foo'))
class AppTestPyTestMark:
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -345,13 +345,13 @@
def interp_attrproperty_bytes(name, cls, doc=None):
"NOT_RPYTHON: initialization-time only"
def fget(space, obj):
- return space.wrapbytes(getattr(obj, name))
+ return space.newbytes(getattr(obj, name))
return GetSetProperty(fget, cls=cls, doc=doc)
def interp_attrproperty_fsdecode(name, cls, doc=None):
"NOT_RPYTHON: initialization-time only"
def fget(space, obj):
- return space.fsdecode(space.wrapbytes(getattr(obj, name)))
+ return space.fsdecode(space.newbytes(getattr(obj, name)))
return GetSetProperty(fget, cls=cls, doc=doc)
def interp_attrproperty_w(name, cls, doc=None):
diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -18,7 +18,7 @@
startingpos, endingpos):
raise OperationError(space.w_UnicodeDecodeError,
space.newtuple([space.wrap(encoding),
- space.wrapbytes(s),
+ space.newbytes(s),
space.wrap(startingpos),
space.wrap(endingpos),
space.wrap(msg)]))
@@ -111,7 +111,7 @@
return space.call_method(w_uni, 'encode',
getfilesystemencoding(space),
space.wrap('surrogateescape'))
- return space.wrapbytes(bytes)
+ return space.newbytes(bytes)
def encode(space, w_data, encoding=None, errors='strict'):
from pypy.objspace.std.unicodeobject import encode_object
@@ -141,9 +141,7 @@
return result
def encode_utf8(space, uni, allow_surrogates=False):
- # Note that this function never raises UnicodeEncodeError,
- # since surrogate pairs are allowed.
- # This is not the case with Python3.
+ # Note that Python3 tends to forbid lone surrogates
return runicode.unicode_encode_utf_8(
uni, len(uni), "strict",
errorhandler=encode_error_handler(space),
diff --git a/pypy/module/__builtin__/abstractinst.py b/pypy/module/__builtin__/abstractinst.py
--- a/pypy/module/__builtin__/abstractinst.py
+++ b/pypy/module/__builtin__/abstractinst.py
@@ -43,9 +43,61 @@
raise # propagate other errors
return space.type(w_obj)
+
+# ---------- isinstance ----------
+
+
+def p_recursive_isinstance_w(space, w_inst, w_cls):
+ # Copied straight from CPython 2.7. Does not handle 'cls' being a tuple.
+ if space.isinstance_w(w_cls, space.w_type):
+ return p_recursive_isinstance_type_w(space, w_inst, w_cls)
+
+ check_class(space, w_cls, "isinstance() arg 2 must be a class, type,"
+ " or tuple of classes and types")
+ try:
+ w_abstractclass = space.getattr(w_inst, space.wrap('__class__'))
+ except OperationError as e:
+ if not e.match(space, space.w_AttributeError):
+ raise # propagate other errors
+ return False
+ else:
+ return p_abstract_issubclass_w(space, w_abstractclass, w_cls)
+
+
+def p_recursive_isinstance_type_w(space, w_inst, w_type):
+ # subfunctionality of p_recursive_isinstance_w(): assumes that w_type is
+ # a type object. Copied straight from CPython 2.7.
+ if space.isinstance_w(w_inst, w_type):
+ return True
+ try:
+ w_abstractclass = space.getattr(w_inst, space.wrap('__class__'))
+ except OperationError as e:
+ if not e.match(space, space.w_AttributeError):
+ raise # propagate other errors
+ else:
+ if w_abstractclass is not space.type(w_inst):
+ if space.isinstance_w(w_abstractclass, space.w_type):
+ return space.issubtype_w(w_abstractclass, w_type)
+ return False
+
+
@jit.unroll_safe
def abstract_isinstance_w(space, w_obj, w_klass_or_tuple, allow_override=False):
"""Implementation for the full 'isinstance(obj, klass_or_tuple)'."""
+ # Copied from CPython 2.7's PyObject_Isinstance(). Additionally,
+ # if 'allow_override' is False (the default), then don't try to
+ # use a custom __instancecheck__ method.
+
+ # WARNING: backward compatibility function name here. CPython
+ # uses the name "abstract" to refer to the logic of handling
+ # class-like objects, with a "__bases__" attribute. This function
+ # here is not related to that and implements the full
+ # PyObject_IsInstance() logic.
+
+ # Quick test for an exact match
+ if space.type(w_obj) is w_klass_or_tuple:
+ return True
+
# -- case (anything, tuple)
# XXX it might be risky that the JIT sees this
if space.isinstance_w(w_klass_or_tuple, space.w_tuple):
@@ -55,64 +107,55 @@
return False
# -- case (anything, type)
- try:
- if allow_override:
- w_result = space.isinstance_allow_override(w_obj, w_klass_or_tuple)
- else:
- w_result = space.isinstance(w_obj, w_klass_or_tuple)
- except OperationError as e: # if w_klass_or_tuple was not a type, ignore it
- if not e.match(space, space.w_TypeError):
- raise # propagate other errors
- else:
- if space.is_true(w_result):
- return True
- # From now on we know that w_klass_or_tuple is indeed a type.
- # Try also to compare it with obj.__class__, if this is not
- # the same as type(obj).
- w_pretendtype = abstract_getclass(space, w_obj)
- try:
- if space.is_w(w_pretendtype, space.type(w_obj)):
- return False # common case: obj.__class__ is type(obj)
- if not allow_override:
- return space.issubtype_w(w_pretendtype, w_klass_or_tuple)
- w_result = space.issubtype_allow_override(w_pretendtype,
- w_klass_or_tuple)
- except OperationError as e:
- if e.async(space):
- raise
- return False # ignore most exceptions
- else:
- return space.is_true(w_result)
+ if allow_override:
+ w_check = space.lookup(w_klass_or_tuple, "__instancecheck__")
+ if w_check is not None:
+ # this is the common case: all type objects have a method
+ # __instancecheck__. The one in the base 'type' type calls
+ # back p_recursive_isinstance_type_w() from the present module.
+ return space.is_true(space.get_and_call_function(
+ w_check, w_klass_or_tuple, w_obj))
- return _abstract_isinstance_w_helper(space, w_obj, w_klass_or_tuple)
+ return p_recursive_isinstance_w(space, w_obj, w_klass_or_tuple)
-def _abstract_isinstance_w_helper(space, w_obj, w_klass_or_tuple):
- # -- case (anything, abstract-class)
- check_class(space, w_klass_or_tuple,
- "isinstance() arg 2 must be a class, type,"
- " or tuple of classes and types")
- try:
- w_abstractclass = space.getattr(w_obj, space.wrap('__class__'))
- except OperationError as e:
- if e.async(space): # ignore most exceptions
- raise
- return False
- else:
- return _issubclass_recurse(space, w_abstractclass, w_klass_or_tuple)
+# ---------- issubclass ----------
@jit.unroll_safe
-def _issubclass_recurse(space, w_derived, w_top):
- """Internal helper for abstract cases. Here, w_top cannot be a tuple."""
- if space.is_w(w_derived, w_top):
- return True
- w_bases = _get_bases(space, w_derived)
- if w_bases is not None:
- for w_base in space.fixedview(w_bases):
- if _issubclass_recurse(space, w_base, w_top):
+def p_abstract_issubclass_w(space, w_derived, w_cls):
+ # Copied straight from CPython 2.7, function abstract_issubclass().
+ # Don't confuse this with the function abstract_issubclass_w() below.
+ # Here, w_cls cannot be a tuple.
+ while True:
+ if space.is_w(w_derived, w_cls):
+ return True
+ w_bases = _get_bases(space, w_derived)
+ if w_bases is None:
+ return False
+ bases_w = space.fixedview(w_bases)
+ last_index = len(bases_w) - 1
+ if last_index < 0:
+ return False
+ # Avoid recursivity in the single inheritance case; in general,
+ # don't recurse on the last item in the tuple (loop instead).
+ for i in range(last_index):
+ if p_abstract_issubclass_w(space, bases_w[i], w_cls):
return True
- return False
+ w_derived = bases_w[last_index]
+
+
+def p_recursive_issubclass_w(space, w_derived, w_cls):
+ # From CPython's function of the same name (which as far as I can tell
+ # is not recursive). Copied straight from CPython 2.7.
+ if (space.isinstance_w(w_cls, space.w_type) and
+ space.isinstance_w(w_derived, space.w_type)):
+ return space.issubtype_w(w_derived, w_cls)
+ #
+ check_class(space, w_derived, "issubclass() arg 1 must be a class")
+ check_class(space, w_cls, "issubclass() arg 2 must be a class"
+ " or tuple of classes")
+ return p_abstract_issubclass_w(space, w_derived, w_cls)
@jit.unroll_safe
@@ -120,32 +163,46 @@
allow_override=False):
"""Implementation for the full 'issubclass(derived, klass_or_tuple)'."""
- # -- case (class-like-object, tuple-of-classes)
+ # WARNING: backward compatibility function name here. CPython
+ # uses the name "abstract" to refer to the logic of handling
+ # class-like objects, with a "__bases__" attribute. This function
+ # here is not related to that and implements the full
+ # PyObject_IsSubclass() logic. There is also p_abstract_issubclass_w().
+
+ # -- case (anything, tuple-of-classes)
if space.isinstance_w(w_klass_or_tuple, space.w_tuple):
for w_klass in space.fixedview(w_klass_or_tuple):
if abstract_issubclass_w(space, w_derived, w_klass, allow_override):
return True
return False
- # -- case (type, type)
- try:
- if not allow_override:
- return space.issubtype_w(w_derived, w_klass_or_tuple)
- w_result = space.issubtype_allow_override(w_derived, w_klass_or_tuple)
- except OperationError as e: # if one of the args was not a type, ignore it
- if not e.match(space, space.w_TypeError):
- raise # propagate other errors
- else:
- return space.is_true(w_result)
+ # -- case (anything, type)
+ if allow_override:
+ w_check = space.lookup(w_klass_or_tuple, "__subclasscheck__")
+ if w_check is not None:
+ # this is the common case: all type objects have a method
+ # __subclasscheck__. The one in the base 'type' type calls
+ # back p_recursive_issubclass_w() from the present module.
+ return space.is_true(space.get_and_call_function(
+ w_check, w_klass_or_tuple, w_derived))
- check_class(space, w_derived, "issubclass() arg 1 must be a class")
- # from here on, we are sure that w_derived is a class-like object
+ return p_recursive_issubclass_w(space, w_derived, w_klass_or_tuple)
- # -- case (class-like-object, abstract-class)
- check_class(space, w_klass_or_tuple,
- "issubclass() arg 2 must be a class, type,"
- " or tuple of classes and types")
- return _issubclass_recurse(space, w_derived, w_klass_or_tuple)
+
+# ------------------------------------------------------------
+# Exception helpers
+
+def exception_is_valid_obj_as_class_w(space, w_obj):
+ return BaseObjSpace.exception_is_valid_obj_as_class_w(space, w_obj)
+
+def exception_is_valid_class_w(space, w_cls):
+ return BaseObjSpace.exception_is_valid_class_w(space, w_cls)
+
+def exception_getclass(space, w_obj):
+ return BaseObjSpace.exception_getclass(space, w_obj)
+
+def exception_issubclass_w(space, w_cls1, w_cls2):
+ return BaseObjSpace.exception_issubclass_w(space, w_cls1, w_cls2)
# ____________________________________________________________
# App-level interface
diff --git a/pypy/module/__builtin__/test/test_abstractinst.py b/pypy/module/__builtin__/test/test_abstractinst.py
--- a/pypy/module/__builtin__/test/test_abstractinst.py
+++ b/pypy/module/__builtin__/test/test_abstractinst.py
@@ -226,3 +226,26 @@
c = C()
assert isinstance(c, C)
assert not called
+
+ def test_instancecheck_exception_not_eaten(self):
+ class M(object):
+ def __instancecheck__(self, obj):
+ raise TypeError("foobar")
+
+ e = raises(TypeError, isinstance, 42, M())
+ assert str(e.value) == "foobar"
+
+ def test_issubclass_exception_not_eaten(self):
+ class M(object):
+ def __subclasscheck__(self, subcls):
+ raise TypeError("foobar")
+
+ e = raises(TypeError, issubclass, 42, M())
+ assert str(e.value) == "foobar"
+
+ def test_issubclass_no_fallback(self):
+ class M(object):
+ def __subclasscheck__(self, subcls):
+ return False
+
+ assert issubclass(42, M()) is False
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -40,7 +40,7 @@
s = self.builder.build()
self.builder = None
if strtype is str:
- return space.wrapbytes(s)
+ return space.newbytes(s)
else:
return space.wrap(s)
diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py
--- a/pypy/module/__pypy__/interp_intop.py
+++ b/pypy/module/__pypy__/interp_intop.py
@@ -2,21 +2,10 @@
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rarithmetic import r_uint, intmask
+from rpython.rlib.rarithmetic import int_c_div, int_c_mod
from rpython.rlib import jit
-# XXX maybe temporary: hide llop.int_{floordiv,mod} from the JIT,
-# because now it expects only Python-style divisions, not the
-# C-style divisions of these two ll operations
- at jit.dont_look_inside
-def _int_floordiv(n, m):
- return llop.int_floordiv(lltype.Signed, n, m)
-
- at jit.dont_look_inside
-def _int_mod(n, m):
- return llop.int_mod(lltype.Signed, n, m)
-
-
@unwrap_spec(n=int, m=int)
def int_add(space, n, m):
return space.wrap(llop.int_add(lltype.Signed, n, m))
@@ -31,11 +20,11 @@
@unwrap_spec(n=int, m=int)
def int_floordiv(space, n, m):
- return space.wrap(_int_floordiv(n, m))
+ return space.wrap(int_c_div(n, m))
@unwrap_spec(n=int, m=int)
def int_mod(space, n, m):
- return space.wrap(_int_mod(n, m))
+ return space.wrap(int_c_mod(n, m))
@unwrap_spec(n=int, m=int)
def int_lshift(space, n, m):
diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py
--- a/pypy/module/_cffi_backend/cbuffer.py
+++ b/pypy/module/_cffi_backend/cbuffer.py
@@ -55,9 +55,9 @@
start, stop, step, size = space.decode_index4(w_index,
self.buffer.getlength())
if step == 0:
- return space.wrapbytes(self.buffer.getitem(start))
+ return space.newbytes(self.buffer.getitem(start))
res = self.buffer.getslice(start, stop, step, size)
- return space.wrapbytes(res)
+ return space.newbytes(res)
def descr_setitem(self, space, w_index, w_newstring):
start, stop, step, size = space.decode_index4(w_index,
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -220,6 +220,11 @@
if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK:
raise oefmt(space.w_SystemError,
"libffi failed to build this callback")
+ if closure_ptr.c_user_data != unique_id:
+ raise oefmt(space.w_SystemError,
+ "ffi_prep_closure(): bad user_data (it seems that the "
+ "version of the libffi library seen at runtime is "
+ "different from the 'ffi.h' file seen at compile-time)")
def py_invoke(self, ll_res, ll_args):
jitdriver1.jit_merge_point(callback=self,
diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py
--- a/pypy/module/_cffi_backend/ctypeprim.py
+++ b/pypy/module/_cffi_backend/ctypeprim.py
@@ -84,7 +84,7 @@
if self.size == 1:
with cdataobj as ptr:
s = ptr[0]
- return self.space.wrapbytes(s)
+ return self.space.newbytes(s)
return W_CType.string(self, cdataobj, maxlen)
def unpack_ptr(self, w_ctypeptr, ptr, length):
@@ -126,7 +126,7 @@
return self.space.wrap(ord(cdata[0]))
def convert_to_object(self, cdata):
- return self.space.wrapbytes(cdata[0])
+ return self.space.newbytes(cdata[0])
def _convert_to_char(self, w_ob):
space = self.space
@@ -146,7 +146,7 @@
def unpack_ptr(self, w_ctypeptr, ptr, length):
s = rffi.charpsize2str(ptr, length)
- return self.space.wrapbytes(s)
+ return self.space.newbytes(s)
# XXX explicitly use an integer type instead of lltype.UniChar here,
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -122,7 +122,7 @@
s = rffi.charp2str(ptr)
else:
s = rffi.charp2strn(ptr, length)
- return space.wrapbytes(s)
+ return space.newbytes(s)
#
# pointer to a wchar_t: builds and returns a unicode
if self.is_unichar_ptr_or_array():
@@ -131,7 +131,7 @@
u = rffi.wcharp2unicode(cdata)
else:
u = rffi.wcharp2unicoden(cdata, length)
- return space.wrap(u)
+ return space.newunicode(u)
#
return W_CType.string(self, cdataobj, maxlen)
diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
From pypy.commits at gmail.com Mon Aug 1 14:46:00 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 01 Aug 2016 11:46:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Remove wrong handle_with_stmt from
merge
Message-ID: <579f98e8.11051c0a.273b7.5457@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85969:6f1bd36b0cda
Date: 2016-08-01 20:45 +0200
http://bitbucket.org/pypy/pypy/changeset/6f1bd36b0cda/
Log: Remove wrong handle_with_stmt from merge
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -415,13 +415,6 @@
return ast.Try(body, handlers, otherwise, finally_suite,
try_node.get_lineno(), try_node.get_column())
- def handle_with_stmt(self, with_node, is_async):
- if is_async:
- wi = ast.AsyncWith(test, target, body, with_node.get_lineno(),
- with_node.get_column())
- else:
- wi = ast.With(test, target, body, with_node.get_lineno(),
- with_node.get_column())
def handle_with_item(self, item_node):
test = self.handle_expr(item_node.get_child(0))
if item_node.num_children() == 3:
From pypy.commits at gmail.com Mon Aug 1 15:23:30 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 12:23:30 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: use brackets to lookup dict
Message-ID: <579fa1b2.d8011c0a.20ece.5623@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r85970:8d8e03f06ed0
Date: 2016-08-01 19:13 +0200
http://bitbucket.org/pypy/pypy/changeset/8d8e03f06ed0/
Log: use brackets to lookup dict
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -317,7 +317,7 @@
# register, and we emit a load from the cc into this register.
if resloc is ebp:
- self.guard_success_cc = condition
+ self.assembler.guard_success_cc = condition
else:
assert lhsloc is xmm0
maskloc = X86_64_XMM_SCRATCH_REG
@@ -334,13 +334,13 @@
self.mc.CMPPS_xxi(lhsloc.value, rhsloc.value, 1 << 2)
else:
self.mc.CMPPD_xxi(lhsloc.value, rhsloc.value, 1 << 2)
- self.flush_vec_cc(rx86.Conditions("NE"), lhsloc, resloc, sizeloc.value)
+ self.flush_vec_cc(rx86.Conditions["NE"], lhsloc, resloc, sizeloc.value)
def genop_vec_int_eq(self, op, arglocs, resloc):
lhsloc, rhsloc, sizeloc = arglocs
size = sizeloc.value
self.mc.PCMPEQ(lhsloc, rhsloc, size)
- self.flush_vec_cc(rx86.Conditions("E"), lhsloc, resloc, sizeloc.value)
+ self.flush_vec_cc(rx86.Conditions["E"], lhsloc, resloc, sizeloc.value)
def genop_vec_int_ne(self, op, arglocs, resloc):
lhsloc, rhsloc, sizeloc = arglocs
@@ -354,7 +354,7 @@
# 11 11 11 11
# ----------- pxor
# 00 11 00 00
- self.flush_vec_cc(rx86.Conditions("NE"), lhsloc, resloc, sizeloc.value)
+ self.flush_vec_cc(rx86.Conditions["NE"], lhsloc, resloc, sizeloc.value)
def genop_vec_int_signext(self, op, arglocs, resloc):
srcloc, sizeloc, tosizeloc = arglocs
From pypy.commits at gmail.com Mon Aug 1 15:23:32 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 12:23:32 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: added three more tests to
ensure enforce_var_in_vector_reg works (found one issue)
Message-ID: <579fa1b4.56421c0a.23487.5dff@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r85971:a88c5ddefcb0
Date: 2016-08-01 21:22 +0200
http://bitbucket.org/pypy/pypy/changeset/a88c5ddefcb0/
Log: added three more tests to ensure enforce_var_in_vector_reg works
(found one issue)
diff --git a/rpython/jit/backend/x86/test/test_x86vector.py b/rpython/jit/backend/x86/test/test_x86vector.py
--- a/rpython/jit/backend/x86/test/test_x86vector.py
+++ b/rpython/jit/backend/x86/test/test_x86vector.py
@@ -1,10 +1,14 @@
import py
from rpython.jit.backend.x86.regloc import *
+from rpython.jit.backend.x86.regalloc import (RegAlloc,
+ X86FrameManager, X86XMMRegisterManager, X86RegisterManager)
+from rpython.jit.backend.x86.vector_ext import TempVector
from rpython.jit.backend.x86.test import test_basic
from rpython.jit.backend.x86.test.test_assembler import \
(TestRegallocPushPop as BaseTestAssembler)
from rpython.jit.metainterp.test import test_vector
from rpython.rtyper.lltypesystem import lltype
+from rpython.jit.backend.detect_cpu import getcpuclass
class TestBasic(test_basic.Jit386Mixin, test_vector.VectorizeTests):
# for the individual tests see
@@ -26,6 +30,30 @@
enable_opts = 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll'
+ at py.test.fixture
+def regalloc(request):
+ from rpython.jit.backend.x86.regalloc import X86FrameManager
+ from rpython.jit.backend.x86.regalloc import X86XMMRegisterManager
+ class FakeToken:
+ class compiled_loop_token:
+ asmmemmgr_blocks = None
+ cpu = getcpuclass()(None, None)
+ cpu.setup()
+ if cpu.HAS_CODEMAP:
+ cpu.codemap.setup()
+ looptoken = FakeToken()
+ asm = cpu.assembler
+ asm.setup_once()
+ asm.setup(looptoken)
+ regalloc = RegAlloc(asm)
+ regalloc.fm = fm = X86FrameManager(cpu.get_baseofs_of_frame_field())
+ regalloc.rm = X86RegisterManager({}, frame_manager = fm, assembler = asm)
+ regalloc.xrm = X86XMMRegisterManager({}, frame_manager = fm, assembler = asm)
+ request.cls.asm = asm
+ request.cls.regalloc = regalloc
+
+
+
class TestAssembler(BaseTestAssembler):
def imm_4_int32(self, a, b, c, d):
@@ -88,3 +116,48 @@
res = self.do_test(callback) & 0xffffffff
assert res == 22
+ def test_enforce_var(self, regalloc):
+ arg = TempVector('f')
+ args = []
+ self.regalloc.fm.bindings[arg] = FrameLoc(0, 64, 'f')
+ reg = self.regalloc.enforce_var_in_vector_reg(arg, args, xmm0)
+ assert reg is xmm0
+
+ def test_enforce_var_xmm0_forbidden(self, regalloc):
+ arg = TempVector('f')
+ arg1 = TempVector('f')
+ args = [arg1]
+ xrm = self.regalloc.xrm
+ xrm.reg_bindings[arg1] = xmm0
+ fr = xrm.free_regs
+ xrm.free_regs = [r for r in fr if r is not xmm0]
+ self.regalloc.fm.bindings[arg] = FrameLoc(0, 64, 'f')
+ reg = self.regalloc.enforce_var_in_vector_reg(arg, args, xmm0)
+ assert reg is xmm0
+ assert len(xrm.reg_bindings) == 2
+ assert xrm.reg_bindings[arg] == xmm0
+ assert xrm.reg_bindings[arg1] != xmm0
+
+ def test_enforce_var_spill(self, regalloc):
+ arg = TempVector('f')
+ arg1 = TempVector('f')
+ arg2 = TempVector('f')
+ args = []
+ xrm = self.regalloc.xrm
+ xrm.reg_bindings[arg1] = xmm0
+ xrm.reg_bindings[arg2] = xmm1
+ xrm.longevity[arg1] = (0,1)
+ xrm.longevity[arg2] = (0,2)
+ xrm.longevity[arg] = (0,3)
+ fr = xrm.free_regs
+ xrm.free_regs = []
+ self.regalloc.fm.bindings[arg] = FrameLoc(0, 64, 'f')
+ self.regalloc.fm.bindings[arg2] = FrameLoc(0, 72, 'f')
+ reg = self.regalloc.enforce_var_in_vector_reg(arg, args, xmm0)
+ assert reg is xmm0
+ assert len(xrm.reg_bindings) == 2
+ assert xrm.reg_bindings[arg] == xmm0
+ assert xrm.reg_bindings[arg1] == xmm1
+ assert arg2 not in xrm.reg_bindings
+
+
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -10,7 +10,7 @@
xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14,
X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc)
from rpython.jit.backend.llsupport.vector_ext import VectorExt
-from rpython.jit.backend.llsupport.regalloc import get_scale, TempVar
+from rpython.jit.backend.llsupport.regalloc import get_scale, TempVar, NoVariableToSpill
from rpython.jit.metainterp.resoperation import (rop, ResOperation,
VectorOp, VectorGuardOp)
from rpython.rlib.objectmodel import we_are_translated, always_inline
@@ -317,7 +317,7 @@
# register, and we emit a load from the cc into this register.
if resloc is ebp:
- self.assembler.guard_success_cc = condition
+ self.guard_success_cc = rev_cond
else:
assert lhsloc is xmm0
maskloc = X86_64_XMM_SCRATCH_REG
@@ -654,7 +654,11 @@
# do we have a free register?
if len(xrm.free_regs) == 0:
# spill a non forbidden variable
- self._spill_var(candidate_to_spill, forbidden_vars, None)
+ if not candidate_to_spill:
+ raise NoVariableToSpill
+ reg = xrm.reg_bindings[candidate_to_spill]
+ xrm._spill_var(candidate_to_spill, forbidden_vars, None)
+ xrm.free_regs.append(reg)
loc = xrm.free_regs.pop()
self.assembler.mov(selected_reg, loc)
reg = xrm.reg_bindings.get(arg, None)
From pypy.commits at gmail.com Mon Aug 1 15:31:56 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 01 Aug 2016 12:31:56 -0700 (PDT)
Subject: [pypy-commit] pypy default: whoops?
Message-ID: <579fa3ac.54bc1c0a.5a260.5f7f@mx.google.com>
Author: mattip
Branch:
Changeset: r85972:54055c320d57
Date: 2016-08-01 22:28 +0300
http://bitbucket.org/pypy/pypy/changeset/54055c320d57/
Log: whoops?
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -46,7 +46,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
- from pypy.tool.error import offset2lineno
+ from rpython.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
From pypy.commits at gmail.com Mon Aug 1 16:22:32 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 01 Aug 2016 13:22:32 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Add note for dirty fix
Message-ID: <579faf88.68adc20a.8208b.0427@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85973:55d7fbdc81b4
Date: 2016-08-01 22:21 +0200
http://bitbucket.org/pypy/pypy/changeset/55d7fbdc81b4/
Log: Add note for dirty fix
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1443,7 +1443,7 @@
def GET_AWAITABLE(self, oparg, next_instr):
from pypy.objspace.std.noneobject import W_NoneObject
if isinstance(self.peekvalue(), W_NoneObject):
- #switch NoneObject with iterable on stack
+ #switch NoneObject with iterable on stack (kind of a dirty fix)
w_firstnone = self.popvalue()
w_i = self.popvalue()
self.pushvalue(w_firstnone)
From pypy.commits at gmail.com Mon Aug 1 17:26:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 01 Aug 2016 14:26:57 -0700 (PDT)
Subject: [pypy-commit] pypy default: Translation fix
Message-ID: <579fbea1.031dc20a.f2c35.2997@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85974:d046815fc8d5
Date: 2016-08-01 22:35 +0100
http://bitbucket.org/pypy/pypy/changeset/d046815fc8d5/
Log: Translation fix
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -54,7 +54,7 @@
name = bytecode.co_name
if not name:
name = ""
- line = offset2lineno(bytecode, next_instr)
+ line = offset2lineno(bytecode, intmask(next_instr))
return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
From pypy.commits at gmail.com Mon Aug 1 21:00:45 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 01 Aug 2016 18:00:45 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: fix test_intlike() and more_init case in
import_extension()
Message-ID: <579ff0bd.274fc20a.39fdf.53be@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85975:1b2752361da5
Date: 2016-08-02 01:59 +0100
http://bitbucket.org/pypy/pypy/changeset/1b2752361da5/
Log: fix test_intlike() and more_init case in import_extension()
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -417,8 +417,7 @@
init = """PyObject *mod = PyModule_Create(&moduledef);"""
if more_init:
init += more_init
- else:
- init += "\nreturn mod;"
+ init += "\nreturn mod;"
return import_module(space, name=modname, init=init, body=body,
w_include_dirs=w_include_dirs,
PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN)
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -779,7 +779,7 @@
""", more_init="""
IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT;
IntLike_Type.tp_as_number = &intlike_as_number;
- intlike_as_number.nb_bool = intlike_nb_nonzero;
+ intlike_as_number.nb_bool = intlike_nb_bool;
intlike_as_number.nb_int = intlike_nb_int;
PyType_Ready(&IntLike_Type);
""")
From pypy.commits at gmail.com Mon Aug 1 21:18:43 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 01 Aug 2016 18:18:43 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix bad merge in test_number.py
Message-ID: <579ff4f3.031dc20a.af26a.0223@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85976:12a7377be119
Date: 2016-08-02 02:18 +0100
http://bitbucket.org/pypy/pypy/changeset/12a7377be119/
Log: Fix bad merge in test_number.py
diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py
--- a/pypy/module/cpyext/test/test_number.py
+++ b/pypy/module/cpyext/test/test_number.py
@@ -1,5 +1,6 @@
from rpython.rtyper.lltypesystem import lltype
from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class TestIterator(BaseApiTest):
def test_check(self, space, api):
@@ -63,7 +64,9 @@
assert 9 == space.unwrap(
api.PyNumber_InPlacePower(space.wrap(3), space.wrap(2), space.w_None))
- def test_PyNumber_Check(self):
+
+class AppTestCNumber(AppTestCpythonExtensionBase):
+ def test_PyNumber_Check(self):
mod = self.import_extension('foo', [
("test_PyNumber_Check", "METH_VARARGS",
'''
From pypy.commits at gmail.com Mon Aug 1 21:54:16 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 01 Aug 2016 18:54:16 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Port test_import_module.c to 3
Message-ID: <579ffd48.c2f3c20a.a80d5.0919@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85977:7b64c8a8597e
Date: 2016-08-02 02:53 +0100
http://bitbucket.org/pypy/pypy/changeset/7b64c8a8597e/
Log: Port test_import_module.c to 3
diff --git a/pypy/module/cpyext/test/test_import_module.c b/pypy/module/cpyext/test/test_import_module.c
--- a/pypy/module/cpyext/test/test_import_module.c
+++ b/pypy/module/cpyext/test/test_import_module.c
@@ -1,17 +1,20 @@
#include "Python.h"
/* Initialize this module. */
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "test_import_module",
+ NULL,
+ -1,
+ NULL, NULL, NULL, NULL, NULL
+};
+
PyMODINIT_FUNC
-inittest_import_module(void)
+PyInit_test_import_module(void)
{
- PyObject *m, *d;
-
- m = Py_InitModule("test_import_module", NULL);
- if (m == NULL)
- return;
- d = PyModule_GetDict(m);
- if (d) {
- PyDict_SetItemString(d, "TEST", (PyObject *) Py_None);
- }
- /* No need to check the error here, the caller will do that */
+ PyObject* m = PyModule_Create(&moduledef);
+ if (m == NULL)
+ return NULL;
+ PyModule_AddObject(m, "TEST", (PyObject *) Py_None);
+ return m;
}
From pypy.commits at gmail.com Mon Aug 1 23:53:32 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 01 Aug 2016 20:53:32 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-realloc: test, fix for translated realloc
Message-ID: <57a0193c.ca11c30a.76936.22d7@mx.google.com>
Author: mattip
Branch: cpyext-realloc
Changeset: r85978:12bb1db94194
Date: 2016-08-02 06:37 +0300
http://bitbucket.org/pypy/pypy/changeset/12bb1db94194/
Log: test, fix for translated realloc
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -21,6 +21,8 @@
flavor='raw',
add_memory_pressure=True)
+realloc = rffi.llexternal('realloc', [rffi.VOIDP, rffi.SIZE_T], rffi.VOIDP)
+
@cpython_api([rffi.VOIDP, size_t], rffi.VOIDP)
def PyObject_Realloc(space, ptr, size):
if not lltype.cast_ptr_to_int(ptr):
@@ -28,7 +30,7 @@
flavor='raw',
add_memory_pressure=True)
# XXX FIXME
- return lltype.nullptr(rffi.VOIDP.TO)
+ return realloc(ptr, size)
@cpython_api([rffi.VOIDP], lltype.Void)
def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -235,7 +235,6 @@
assert type(x) is int
assert x == -424344
- @pytest.mark.skipif(True, reason='realloc not fully implemented')
def test_object_realloc(self):
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
From pypy.commits at gmail.com Tue Aug 2 02:08:25 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 01 Aug 2016 23:08:25 -0700 (PDT)
Subject: [pypy-commit] pypy default: use intmask to not overflow rffi.cast,
tests pass now again (s390x)
Message-ID: <57a038d9.17a61c0a.d418c.5eda@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r85979:dffc8113a10f
Date: 2016-08-02 08:06 +0200
http://bitbucket.org/pypy/pypy/changeset/dffc8113a10f/
Log: use intmask to not overflow rffi.cast, tests pass now again (s390x)
diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py
--- a/rpython/jit/backend/zarch/test/test_assembler.py
+++ b/rpython/jit/backend/zarch/test/test_assembler.py
@@ -21,6 +21,7 @@
from rpython.rlib.debug import ll_assert
from rpython.rlib.longlong2float import (float2longlong,
DOUBLE_ARRAY_PTR, singlefloat2uint_emulator)
+from rpython.rlib.rarithmetic import r_uint, intmask
import ctypes
CPU = getcpuclass()
@@ -168,7 +169,7 @@
def test_load_byte_zero_extend(self):
adr = self.a.datablockwrapper.malloc_aligned(16, 16)
data = rffi.cast(rffi.CArrayPtr(rffi.ULONG), adr)
- data[0] = rffi.cast(rffi.ULONG,0xffffFFFFffffFF02)
+ data[0] = rffi.cast(rffi.ULONG, intmask(0xffffFFFFffffFF02))
self.a.mc.load_imm(r.r3, adr+7)
self.a.mc.LLGC(r.r2, loc.addr(0,r.r3))
self.a.mc.BCR(con.ANY, r.r14)
@@ -177,7 +178,7 @@
def test_load_byte_and_imm(self):
adr = self.a.datablockwrapper.malloc_aligned(16, 16)
data = rffi.cast(rffi.CArrayPtr(rffi.ULONG), adr)
- data[0] = rffi.cast(rffi.ULONG,0xffffFFFFffff0001)
+ data[0] = rffi.cast(rffi.ULONG, intmask(0xffffFFFFffff0001))
self.a.mc.load_imm(r.r3, adr)
self.a.mc.LG(r.r2, loc.addr(0,r.r3))
self.a.mc.LLGC(r.r2, loc.addr(7,r.r3))
From pypy.commits at gmail.com Tue Aug 2 04:41:25 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 01:41:25 -0700 (PDT)
Subject: [pypy-commit] pypy default: update comment
Message-ID: <57a05cb5.eeb8c20a.6ebe3.7017@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85980:537ba35b03ea
Date: 2016-08-01 16:00 +0200
http://bitbucket.org/pypy/pypy/changeset/537ba35b03ea/
Log: update comment
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
From pypy.commits at gmail.com Tue Aug 2 04:41:27 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 01:41:27 -0700 (PDT)
Subject: [pypy-commit] pypy default: Issue #2360: Test and probable fix for
_rawffi.alt on Windows
Message-ID: <57a05cb7.469d1c0a.c5985.96d4@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85981:e87949c507ce
Date: 2016-08-02 10:43 +0200
http://bitbucket.org/pypy/pypy/changeset/e87949c507ce/
Log: Issue #2360: Test and probable fix for _rawffi.alt on Windows
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -20,7 +20,8 @@
def _getfunc(space, CDLL, w_name, w_argtypes, w_restype):
argtypes_w, argtypes, w_restype, restype = unpack_argtypes(
space, w_argtypes, w_restype)
- if space.isinstance_w(w_name, space.w_str):
+ if (space.isinstance_w(w_name, space.w_str) or
+ space.isinstance_w(w_name, space.w_unicode)):
name = space.str_w(w_name)
try:
func = CDLL.cdll.getpointer(name, argtypes, restype,
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
From pypy.commits at gmail.com Tue Aug 2 04:41:29 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 01:41:29 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge heads
Message-ID: <57a05cb9.09afc20a.998b.7268@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85982:518912cfcdd9
Date: 2016-08-02 10:43 +0200
http://bitbucket.org/pypy/pypy/changeset/518912cfcdd9/
Log: merge heads
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
From pypy.commits at gmail.com Tue Aug 2 05:23:01 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 02:23:01 -0700 (PDT)
Subject: [pypy-commit] pypy default: Issue #2348: FreeBSD needs this value
of _POSIX_C_SOURCE to expose
Message-ID: <57a06675.c15e1c0a.4787.4961@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85983:3a63f18f76cb
Date: 2016-08-02 11:24 +0200
http://bitbucket.org/pypy/pypy/changeset/3a63f18f76cb/
Log: Issue #2348: FreeBSD needs this value of _POSIX_C_SOURCE to expose
functions like unlinkat(), used on py3k.
diff --git a/rpython/translator/c/src/precommondefs.h b/rpython/translator/c/src/precommondefs.h
--- a/rpython/translator/c/src/precommondefs.h
+++ b/rpython/translator/c/src/precommondefs.h
@@ -18,9 +18,9 @@
#define _LARGEFILE_SOURCE 1
/* Define on NetBSD to activate all library features */
#define _NETBSD_SOURCE 1
-/* Define to activate features from IEEE Stds 1003.1-2001 */
+/* Define to activate features from IEEE Stds 1003.1-2008 */
#ifndef _POSIX_C_SOURCE
-# define _POSIX_C_SOURCE 200112L
+# define _POSIX_C_SOURCE 200809L
#endif
/* Define on FreeBSD to activate all library features */
#define __BSD_VISIBLE 1
From pypy.commits at gmail.com Tue Aug 2 07:28:19 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Tue, 02 Aug 2016 04:28:19 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix import error
(PyPyClassCollector does not seem to exist anymore,
pypydir was imported from the wrong file in apptest)
Message-ID: <57a083d3.6814c30a.71dcd.b435@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85984:cf27d3b81de8
Date: 2016-08-02 13:27 +0200
http://bitbucket.org/pypy/pypy/changeset/cf27d3b81de8/
Log: Fix import error (PyPyClassCollector does not seem to exist anymore,
pypydir was imported from the wrong file in apptest)
diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py
--- a/pypy/tool/pytest/apptest.py
+++ b/pypy/tool/pytest/apptest.py
@@ -16,7 +16,7 @@
from pypy.tool.pytest import appsupport
from pypy.tool.pytest.objspace import gettestobjspace
from rpython.tool.udir import udir
-from pypy.conftest import PyPyClassCollector, pypydir
+from pypy import pypydir
from inspect import getmro
pypyroot = os.path.dirname(pypydir)
From pypy.commits at gmail.com Tue Aug 2 09:07:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 06:07:57 -0700 (PDT)
Subject: [pypy-commit] pypy default: Manual copy from the
null_byte_after_str branch: fix a memory leak in cpyext
Message-ID: <57a09b2d.47cbc20a.13c7.d6dc@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85985:dafc333097c5
Date: 2016-08-02 15:09 +0200
http://bitbucket.org/pypy/pypy/changeset/dafc333097c5/
Log: Manual copy from the null_byte_after_str branch: fix a memory leak
in cpyext
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
From pypy.commits at gmail.com Tue Aug 2 09:26:05 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 06:26:05 -0700 (PDT)
Subject: [pypy-commit] pypy null_byte_after_str: Revert a few places where
it doesn't really help. This is also an
Message-ID: <57a09f6d.c2a5c20a.adc42.ddb5@mx.google.com>
Author: Armin Rigo
Branch: null_byte_after_str
Changeset: r85986:f4cf210c8a79
Date: 2016-08-02 15:16 +0200
http://bitbucket.org/pypy/pypy/changeset/f4cf210c8a79/
Log: Revert a few places where it doesn't really help. This is also an
attempt to avoid obscure bugs in code that is not tested a lot.
diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py
--- a/pypy/module/_locale/interp_locale.py
+++ b/pypy/module/_locale/interp_locale.py
@@ -126,9 +126,13 @@
space.isinstance_w(w_s2, space.w_str)):
s1, s2 = space.str_w(w_s1), space.str_w(w_s2)
- with rffi.scoped_view_charp(s1) as s1_c:
- with rffi.scoped_view_charp(s2) as s2_c:
- return space.wrap(_strcoll(s1_c, s2_c))
+ s1_c = rffi.str2charp(s1)
+ s2_c = rffi.str2charp(s2)
+ try:
+ return space.wrap(_strcoll(s1_c, s2_c))
+ finally:
+ rffi.free_charp(s1_c)
+ rffi.free_charp(s2_c)
s1, s2 = space.unicode_w(w_s1), space.unicode_w(w_s2)
@@ -151,15 +155,21 @@
n1 = len(s) + 1
buf = lltype.malloc(rffi.CCHARP.TO, n1, flavor="raw", zero=True)
- with rffi.scoped_view_charp(s) as s_c:
+ s_c = rffi.str2charp(s)
+ try:
n2 = _strxfrm(buf, s_c, n1) + 1
+ finally:
+ rffi.free_charp(s_c)
if n2 > n1:
# more space needed
lltype.free(buf, flavor="raw")
buf = lltype.malloc(rffi.CCHARP.TO, intmask(n2),
flavor="raw", zero=True)
- with rffi.scoped_view_charp(s) as s_c:
+ s_c = rffi.str2charp(s)
+ try:
_strxfrm(buf, s_c, n2)
+ finally:
+ rffi.free_charp(s_c)
val = rffi.charp2str(buf)
lltype.free(buf, flavor="raw")
@@ -188,8 +198,11 @@
def gettext(space, msg):
"""gettext(msg) -> string
Return translation of msg."""
- with rffi.scoped_view_charp(msg) as msg_c:
+ msg_c = rffi.str2charp(msg)
+ try:
return space.wrap(rffi.charp2str(_gettext(msg_c)))
+ finally:
+ rffi.free_charp(msg_c)
_dgettext = rlocale.external('dgettext', [rffi.CCHARP, rffi.CCHARP], rffi.CCHARP)
@@ -199,21 +212,28 @@
Return translation of msg in domain."""
if space.is_w(w_domain, space.w_None):
domain = None
- with rffi.scoped_view_charp(msg) as msg_c:
+ msg_c = rffi.str2charp(msg)
+ try:
result = _dgettext(domain, msg_c)
# note that 'result' may be the same pointer as 'msg_c',
# so it must be converted to an RPython string *before*
# we free msg_c.
result = rffi.charp2str(result)
+ finally:
+ rffi.free_charp(msg_c)
else:
domain = space.str_w(w_domain)
- with rffi.scoped_view_charp(domain) as domain_c:
- with rffi.scoped_view_charp(msg) as msg_c:
- result = _dgettext(domain_c, msg_c)
- # note that 'result' may be the same pointer as 'msg_c',
- # so it must be converted to an RPython string *before*
- # we free msg_c.
- result = rffi.charp2str(result)
+ domain_c = rffi.str2charp(domain)
+ msg_c = rffi.str2charp(msg)
+ try:
+ result = _dgettext(domain_c, msg_c)
+ # note that 'result' may be the same pointer as 'msg_c',
+ # so it must be converted to an RPython string *before*
+ # we free msg_c.
+ result = rffi.charp2str(result)
+ finally:
+ rffi.free_charp(domain_c)
+ rffi.free_charp(msg_c)
return space.wrap(result)
@@ -227,22 +247,29 @@
if space.is_w(w_domain, space.w_None):
domain = None
- with rffi.scoped_view_charp(msg) as msg_c:
+ msg_c = rffi.str2charp(msg)
+ try:
result = _dcgettext(domain, msg_c, rffi.cast(rffi.INT, category))
# note that 'result' may be the same pointer as 'msg_c',
# so it must be converted to an RPython string *before*
# we free msg_c.
result = rffi.charp2str(result)
+ finally:
+ rffi.free_charp(msg_c)
else:
domain = space.str_w(w_domain)
- with rffi.scoped_view_charp(domain) as domain_c:
- with rffi.scoped_view_charp(msg) as msg_c:
- result = _dcgettext(domain_c, msg_c,
- rffi.cast(rffi.INT, category))
- # note that 'result' may be the same pointer as 'msg_c',
- # so it must be converted to an RPython string *before*
- # we free msg_c.
- result = rffi.charp2str(result)
+ domain_c = rffi.str2charp(domain)
+ msg_c = rffi.str2charp(msg)
+ try:
+ result = _dcgettext(domain_c, msg_c,
+ rffi.cast(rffi.INT, category))
+ # note that 'result' may be the same pointer as 'msg_c',
+ # so it must be converted to an RPython string *before*
+ # we free msg_c.
+ result = rffi.charp2str(result)
+ finally:
+ rffi.free_charp(domain_c)
+ rffi.free_charp(msg_c)
return space.wrap(result)
@@ -259,12 +286,15 @@
result = rffi.charp2str(result)
else:
domain = space.str_w(w_domain)
- with rffi.scoped_view_charp(domain) as domain_c:
+ domain_c = rffi.str2charp(domain)
+ try:
result = _textdomain(domain_c)
# note that 'result' may be the same pointer as 'domain_c'
# (maybe?) so it must be converted to an RPython string
# *before* we free domain_c.
result = rffi.charp2str(result)
+ finally:
+ rffi.free_charp(domain_c)
return space.wrap(result)
@@ -279,13 +309,20 @@
if space.is_w(w_dir, space.w_None):
dir = None
- with rffi.scoped_view_charp(domain) as domain_c:
+ domain_c = rffi.str2charp(domain)
+ try:
dirname = _bindtextdomain(domain_c, dir)
+ finally:
+ rffi.free_charp(domain_c)
else:
dir = space.str_w(w_dir)
- with rffi.scoped_view_charp(domain) as domain_c:
- with rffi.scoped_view_charp(dir) as dir_c:
- dirname = _bindtextdomain(domain_c, dir_c)
+ domain_c = rffi.str2charp(domain)
+ dir_c = rffi.str2charp(dir)
+ try:
+ dirname = _bindtextdomain(domain_c, dir_c)
+ finally:
+ rffi.free_charp(domain_c)
+ rffi.free_charp(dir_c)
if not dirname:
errno = rposix.get_saved_errno()
@@ -303,13 +340,20 @@
if space.is_w(w_codeset, space.w_None):
codeset = None
- with rffi.scoped_view_charp(domain) as domain_c:
+ domain_c = rffi.str2charp(domain)
+ try:
result = _bind_textdomain_codeset(domain_c, codeset)
+ finally:
+ rffi.free_charp(domain_c)
else:
codeset = space.str_w(w_codeset)
- with rffi.scoped_view_charp(domain) as domain_c:
- with rffi.scoped_view_charp(codeset) as codeset_c:
- result = _bind_textdomain_codeset(domain_c, codeset_c)
+ domain_c = rffi.str2charp(domain)
+ codeset_c = rffi.str2charp(codeset)
+ try:
+ result = _bind_textdomain_codeset(domain_c, codeset_c)
+ finally:
+ rffi.free_charp(domain_c)
+ rffi.free_charp(codeset_c)
if not result:
return space.w_None
diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py
--- a/pypy/module/_pypyjson/interp_decoder.py
+++ b/pypy/module/_pypyjson/interp_decoder.py
@@ -52,13 +52,13 @@
# 1) we automatically get the '\0' sentinel at the end of the string,
# which means that we never have to check for the "end of string"
# 2) we can pass the buffer directly to strtod
- self.ll_chars, self.buf_flag = rffi.get_nonmovingbuffer_final_null(s)
+ self.ll_chars = rffi.str2charp(s)
self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
self.pos = 0
self.last_type = TYPE_UNKNOWN
def close(self):
- rffi.free_nonmovingbuffer(self.s, self.ll_chars, self.buf_flag)
+ rffi.free_charp(self.ll_chars)
lltype.free(self.end_ptr, flavor='raw')
def getslice(self, start, end):
diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py
--- a/pypy/module/_winreg/interp_winreg.py
+++ b/pypy/module/_winreg/interp_winreg.py
@@ -218,7 +218,7 @@
subkey = None
else:
subkey = space.str_w(w_subkey)
- with rffi.scoped_view_charp(value) as dataptr:
+ with rffi.scoped_str2charp(value) as dataptr:
ret = rwinreg.RegSetValue(hkey, subkey, rwinreg.REG_SZ, dataptr, len(value))
if ret != 0:
raiseWindowsError(space, ret, 'RegSetValue')
diff --git a/rpython/rlib/_os_support.py b/rpython/rlib/_os_support.py
--- a/rpython/rlib/_os_support.py
+++ b/rpython/rlib/_os_support.py
@@ -20,7 +20,6 @@
charp2str = staticmethod(rffi.charp2str)
charpsize2str = staticmethod(rffi.charpsize2str)
scoped_str2charp = staticmethod(rffi.scoped_str2charp)
- scoped_view_charp = staticmethod(rffi.scoped_view_charp)
str2charp = staticmethod(rffi.str2charp)
free_charp = staticmethod(rffi.free_charp)
scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_buffer)
@@ -56,8 +55,6 @@
charpsize2str = staticmethod(rffi.wcharpsize2unicode)
str2charp = staticmethod(rffi.unicode2wcharp)
scoped_str2charp = staticmethod(rffi.scoped_unicode2wcharp)
- scoped_view_charp = staticmethod(rffi.scoped_unicode2wcharp)
- # ^^^ XXX there is no unicode variant of rffi.scoped_view_charp
free_charp = staticmethod(rffi.free_wcharp)
scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_unicodebuffer)
diff --git a/rpython/rlib/rposix_environ.py b/rpython/rlib/rposix_environ.py
--- a/rpython/rlib/rposix_environ.py
+++ b/rpython/rlib/rposix_environ.py
@@ -163,7 +163,7 @@
return result
def getenv_llimpl(name):
- with traits.scoped_view_charp(name) as l_name:
+ with traits.scoped_str2charp(name) as l_name:
l_result = getenv(l_name)
return traits.charp2str(l_result) if l_result else None
@@ -206,7 +206,7 @@
save_err=rffi.RFFI_SAVE_ERRNO)
def unsetenv_llimpl(name):
- with rffi.scoped_view_charp(name) as l_name:
+ with rffi.scoped_str2charp(name) as l_name:
error = rffi.cast(lltype.Signed, os_unsetenv(l_name))
if error:
from rpython.rlib import rposix
diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -963,7 +963,7 @@
self.settimeout(timeout)
def setsockopt(self, level, option, value):
- with rffi.scoped_view_charp(value) as buf:
+ with rffi.scoped_str2charp(value) as buf:
res = _c.socketsetsockopt(self.fd, level, option,
rffi.cast(rffi.VOIDP, buf),
len(value))
From pypy.commits at gmail.com Tue Aug 2 09:26:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 06:26:07 -0700 (PDT)
Subject: [pypy-commit] pypy null_byte_after_str: Cancel a fragile dependency
with details of space.str_w(), and instead
Message-ID: <57a09f6f.87941c0a.985f8.01eb@mx.google.com>
Author: Armin Rigo
Branch: null_byte_after_str
Changeset: r85987:2977ce236abb
Date: 2016-08-02 15:27 +0200
http://bitbucket.org/pypy/pypy/changeset/2977ce236abb/
Log: Cancel a fragile dependency with details of space.str_w(), and
instead do it all inside W_CTypeFunc._call
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -181,7 +183,8 @@
if flag == 1:
lltype.free(raw_cdata, flavor='raw')
elif flag >= 4:
- value = args_w[i].str_w(space)
+ value = keepalives[i]
+ assert value is not None
rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -261,13 +261,12 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
if self.accept_str and space.isinstance_w(w_init, space.w_str):
# special case to optimize strings passed to a "char *" argument
- # WARNING: this relies on the fact that w_init.str_w() returns
- # always the same object for the same w_init!
value = w_init.str_w(space)
+ keepalives[i] = value
buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
rffi.cast(rffi.CCHARPP, cdata)[0] = buf
return ord(buf_flag) # 4, 5 or 6
@@ -307,10 +306,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -451,9 +451,6 @@
return self._value
def str_w(self, space):
- # WARNING: _cffi_backend/ctypeptr.py depends on the fact that
- # w_obj.str_w() called twice on the same object returns the
- # exact same string object!
return self._value
def buffer_w(self, space, flags):
From pypy.commits at gmail.com Tue Aug 2 09:48:52 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 06:48:52 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: fix test_obj.py
Message-ID: <57a0a4c4.c3f0c20a.4a412.e8d4@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85988:381e9e66fcc3
Date: 2016-08-02 14:48 +0100
http://bitbucket.org/pypy/pypy/changeset/381e9e66fcc3/
Log: fix test_obj.py
diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
--- a/pypy/objspace/std/test/test_obj.py
+++ b/pypy/objspace/std/test/test_obj.py
@@ -185,12 +185,12 @@
skip("cannot run this test as apptest")
for u in [u"", u"a", u"aa"]:
assert id(self.unwrap_wrap_unicode(u)) == id(u)
- s = str(u)
- assert id(self.unwrap_wrap_str(s)) == id(s)
+ s = u.encode()
+ assert id(self.unwrap_wrap_bytes(s)) == id(s)
#
- assert id('') == (256 << 4) | 11 # always
+ assert id(b'') == (256 << 4) | 11 # always
assert id(u'') == (257 << 4) | 11
- assert id('a') == (ord('a') << 4) | 11
+ assert id(b'a') == (ord('a') << 4) | 11
assert id(u'\u1234') == ((~0x1234) << 4) | 11
def test_id_of_tuples(self):
@@ -243,13 +243,13 @@
l = []
def add(s, u):
l.append(s)
- l.append(self.unwrap_wrap_str(s))
+ l.append(self.unwrap_wrap_bytes(s))
l.append(s[:1] + s[1:])
l.append(u)
l.append(self.unwrap_wrap_unicode(u))
l.append(u[:1] + u[1:])
for i in range(3, 18):
- add(str(i), unicode(i))
+ add(str(i).encode(), str(i))
add(b"s", u"s")
add(b"", u"")
From pypy.commits at gmail.com Tue Aug 2 09:55:54 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 06:55:54 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: The empty string hash is now -2
Message-ID: <57a0a66a.497bc20a.273c8.eece@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85989:a215c6257e88
Date: 2016-08-02 14:55 +0100
http://bitbucket.org/pypy/pypy/changeset/a215c6257e88/
Log: The empty string hash is now -2
diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py
--- a/lib-python/3/test/test_hash.py
+++ b/lib-python/3/test/test_hash.py
@@ -164,7 +164,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
From pypy.commits at gmail.com Tue Aug 2 12:11:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 09:11:35 -0700 (PDT)
Subject: [pypy-commit] pypy null_byte_after_str: close branch, ready to merge
Message-ID: <57a0c637.cb7f1c0a.62e3c.d401@mx.google.com>
Author: Armin Rigo
Branch: null_byte_after_str
Changeset: r85990:69db7529f95b
Date: 2016-08-02 18:10 +0200
http://bitbucket.org/pypy/pypy/changeset/69db7529f95b/
Log: close branch, ready to merge
From pypy.commits at gmail.com Tue Aug 2 12:11:38 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 09:11:38 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge null_byte_after_str
Message-ID: <57a0c63a.cb7f1c0a.62e3c.d406@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85991:a84c4b359dcc
Date: 2016-08-02 18:13 +0200
http://bitbucket.org/pypy/pypy/changeset/a84c4b359dcc/
Log: hg merge null_byte_after_str
Allocate all RPython strings with one extra byte, normally unused.
It is used to hold a final zero in case we need some 'char *'
representation of the string, together with checks like 'not
can_move()' or object pinning. Main new thing that this allows:
'ffi.from_buffer(string)'.
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -14,8 +14,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -28,6 +28,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -70,9 +73,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("str or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -260,8 +261,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = w_init.str_w(space)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -297,10 +306,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -401,21 +401,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -135,7 +135,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -147,7 +147,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -181,7 +181,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -193,7 +193,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -228,7 +228,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def RAND_status(space):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -293,6 +293,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -304,6 +305,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
while frame.i > 3:
- jitdriver.can_enter_jit(frame=frame, total=total, j=j)
- jitdriver.jit_merge_point(frame=frame, total=total, j=j)
+ jitdriver.can_enter_jit(frame=frame, total=total, j=j,
+ prev_s=prev_s)
+ jitdriver.jit_merge_point(frame=frame, total=total, j=j,
+ prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
@@ -82,6 +86,11 @@
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
+ s = rstr.mallocstr(16)
+ rgc.ll_write_final_null_char(s)
+ rgc.ll_write_final_null_char(prev_s)
+ if (frame.i & 3) == 0:
+ prev_s = s
return chr(total % 253)
#
class Virt2(object):
diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -994,6 +994,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale)
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1673,25 +1673,6 @@
dest_addr = AddressLoc(base_loc, ofs_loc, scale, offset_loc.value)
self.save_into_mem(dest_addr, value_loc, size_loc)
- def genop_discard_strsetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
- self.cpu.translate_support_code)
- assert itemsize == 1
- dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize)
- self.mc.MOV8(dest_addr, val_loc.lowest8bits())
-
- def genop_discard_unicodesetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
- self.cpu.translate_support_code)
- if itemsize == 4:
- self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc)
- elif itemsize == 2:
- self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc)
- else:
- assert 0, itemsize
-
# genop_discard_setfield_raw = genop_discard_setfield_gc
def genop_math_read_timestamp(self, op, arglocs, resloc):
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1219,6 +1219,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self.assembler.load_effective_addr(ofsloc, ofs_items, scale,
resloc, baseloc)
diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -991,6 +991,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
# src and src_len are tmp registers
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -1381,7 +1381,7 @@
return result
def indirection(arg):
- return interp(arg)
+ return interp(arg) + 1
def run_interp(n):
f = hint(Frame(n), access_directly=True)
diff --git a/rpython/memory/gcheader.py b/rpython/memory/gcheader.py
--- a/rpython/memory/gcheader.py
+++ b/rpython/memory/gcheader.py
@@ -11,7 +11,21 @@
def __init__(self, HDR):
"""NOT_RPYTHON"""
self.HDR = HDR
- self.obj2header = weakref.WeakKeyDictionary()
+ #
+ # The following used to be a weakref.WeakKeyDictionary(), but
+ # the problem is that if you have a gcobj which has already a
+ # weakref cached on it and the hash already cached in that
+ # weakref, and later the hash of the gcobj changes (because it
+ # is ll2ctypes-ified), then that gcobj cannot be used as a key
+ # in a WeakKeyDictionary any more: from this point on,
+ # 'ref(gcobj)' and 'ref(gcobj, callback)' return two objects
+ # with different hashes... and so e.g. the sequence of
+ # operations 'obj2header[x]=y; assert x in obj2header' fails.
+ #
+ # Instead, just use a regular dictionary and hope that not too
+ # many objects would be reclaimed in a given GCHeaderBuilder
+ # instance.
+ self.obj2header = {}
self.size_gc_header = llmemory.GCHeaderOffset(self)
def header_of_object(self, gcptr):
diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py
--- a/rpython/rlib/objectmodel.py
+++ b/rpython/rlib/objectmodel.py
@@ -281,6 +281,10 @@
return lltype.Signed
malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0)
+_translated_to_c = CDefinedIntSymbolic('1 /*_translated_to_c*/', default=0)
+
+def we_are_translated_to_c():
+ return we_are_translated() and _translated_to_c
# ____________________________________________________________
diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py
--- a/rpython/rlib/rdtoa.py
+++ b/rpython/rlib/rdtoa.py
@@ -56,22 +56,24 @@
raise MemoryError
end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
try:
- ll_input = rffi.str2charp(input)
+ # note: don't use the class scoped_view_charp here, it
+ # break some tests because this function is used by the GC
+ ll_input, flag = rffi.get_nonmovingbuffer_final_null(input)
try:
result = dg_strtod(ll_input, end_ptr)
endpos = (rffi.cast(lltype.Signed, end_ptr[0]) -
rffi.cast(lltype.Signed, ll_input))
-
- if endpos == 0 or endpos < len(input):
- raise ValueError("invalid input at position %d" % (endpos,))
-
- return result
finally:
- rffi.free_charp(ll_input)
+ rffi.free_nonmovingbuffer(input, ll_input, flag)
finally:
lltype.free(end_ptr, flavor='raw')
+ if endpos == 0 or endpos < len(input):
+ raise ValueError("invalid input at position %d" % (endpos,))
+
+ return result
+
lower_special_strings = ['inf', '+inf', '-inf', 'nan']
upper_special_strings = ['INF', '+INF', '-INF', 'NAN']
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -1268,3 +1268,26 @@
ptr = lltype.direct_arrayitems(array)
# ptr is a Ptr(FixedSizeArray(Char, 1)). Cast it to a rffi.CCHARP
return rffi.cast(rffi.CCHARP, ptr)
+
+ at jit.dont_look_inside
+ at no_collect
+ at specialize.ll()
+def ll_write_final_null_char(s):
+ """'s' is a low-level STR; writes a terminating NULL character after
+ the other characters in 's'. Warning, this only works because of
+ the 'extra_item_after_alloc' hack inside the definition of STR.
+ """
+ from rpython.rtyper.lltypesystem import rffi
+ PSTR = lltype.typeOf(s)
+ assert has_final_null_char(PSTR) == 1
+ n = llmemory.offsetof(PSTR.TO, 'chars')
+ n += llmemory.itemoffsetof(PSTR.TO.chars, 0)
+ n = llmemory.raw_malloc_usage(n)
+ n += len(s.chars)
+ # no GC operation from here!
+ ptr = rffi.cast(rffi.CCHARP, s)
+ ptr[n] = '\x00'
+
+ at specialize.memo()
+def has_final_null_char(PSTR):
+ return PSTR.TO.chars._hints.get('extra_item_after_alloc', 0)
diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/ll2ctypes.py
@@ -250,7 +250,9 @@
if not A._hints.get('nolength'):
_fields_ = [('length', lentype),
- ('items', max_n * ctypes_item)]
+ ('items',
+ (max_n + A._hints.get('extra_item_after_alloc', 0))
+ * ctypes_item)]
else:
_fields_ = [('items', max_n * ctypes_item)]
@@ -695,6 +697,9 @@
# we have no clue, so we allow whatever index
return 0, maxint
+ def shrinklength(self, newlength):
+ raise NotImplementedError
+
def getitem(self, index, uninitialized_ok=False):
res = self._storage.contents._getitem(index, boundscheck=False)
if isinstance(self._TYPE.OF, lltype.ContainerType):
diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py
--- a/rpython/rtyper/lltypesystem/llmemory.py
+++ b/rpython/rtyper/lltypesystem/llmemory.py
@@ -304,8 +304,15 @@
return cast_ptr_to_adr(p)
def raw_memcopy(self, srcadr, dstadr):
- # should really copy the length field, but we can't
- pass
+ # copy the length field, if we can
+ srclen = srcadr.ptr._obj.getlength()
+ dstlen = dstadr.ptr._obj.getlength()
+ if dstlen != srclen:
+ assert dstlen > srclen, "can't increase the length"
+ # a decrease in length occurs in the GC tests when copying a STR:
+ # the copy is initially allocated with really one extra char,
+ # the 'extra_item_after_alloc', and must be fixed.
+ dstadr.ptr._obj.shrinklength(srclen)
class ArrayLengthOffset(AddressOffset):
@@ -390,11 +397,23 @@
else:
raise Exception("don't know how to take the size of a %r"%TYPE)
+ at specialize.memo()
+def extra_item_after_alloc(ARRAY):
+ assert isinstance(ARRAY, lltype.Array)
+ return ARRAY._hints.get('extra_item_after_alloc', 0)
+
@specialize.arg(0)
def sizeof(TYPE, n=None):
+ """Return the symbolic size of TYPE.
+ For a Struct with no varsized part, it must be called with n=None.
+ For an Array or a Struct with a varsized part, it is the number of items.
+ There is a special case to return 1 more than requested if the array
+ has the hint 'extra_item_after_alloc' set to 1.
+ """
if n is None:
return _sizeof_none(TYPE)
elif isinstance(TYPE, lltype.Array):
+ n += extra_item_after_alloc(TYPE)
return itemoffsetof(TYPE) + _sizeof_none(TYPE.OF) * n
else:
return _sizeof_int(TYPE, n)
@@ -1036,7 +1055,7 @@
_reccopy(subsrc, subdst)
else:
# this is a hack XXX de-hack this
- llvalue = source._obj.getitem(i, uninitialized_ok=True)
+ llvalue = source._obj.getitem(i, uninitialized_ok=2)
if not isinstance(llvalue, lltype._uninitialized):
dest._obj.setitem(i, llvalue)
elif isinstance(T, lltype.Struct):
diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py
--- a/rpython/rtyper/lltypesystem/lltype.py
+++ b/rpython/rtyper/lltypesystem/lltype.py
@@ -1926,14 +1926,29 @@
return 0, stop
def getitem(self, index, uninitialized_ok=False):
- v = self.items[index]
+ try:
+ v = self.items[index]
+ except IndexError:
+ if (index == len(self.items) and uninitialized_ok == 2 and
+ self._TYPE._hints.get('extra_item_after_alloc')):
+ # special case: reading the extra final char returns
+ # an uninitialized, if 'uninitialized_ok==2'
+ return _uninitialized(self._TYPE.OF)
+ raise
if isinstance(v, _uninitialized) and not uninitialized_ok:
raise UninitializedMemoryAccess("%r[%s]"%(self, index))
return v
def setitem(self, index, value):
assert typeOf(value) == self._TYPE.OF
- self.items[index] = value
+ try:
+ self.items[index] = value
+ except IndexError:
+ if (index == len(self.items) and value == '\x00' and
+ self._TYPE._hints.get('extra_item_after_alloc')):
+ # special case: writing NULL to the extra final char
+ return
+ raise
assert not '__dict__' in dir(_array)
assert not '__dict__' in dir(_struct)
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -15,7 +15,7 @@
from rpython.rtyper.tool.rfficache import platform, sizeof_c_type
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.annlowlevel import llhelper
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, we_are_translated_to_c
from rpython.rlib.rstring import StringBuilder, UnicodeBuilder, assert_str0
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import llmemory
@@ -232,40 +232,36 @@
call_external_function = jit.dont_look_inside(
call_external_function)
+ def _oops():
+ raise AssertionError("can't pass (any more) a unicode string"
+ " directly to a VOIDP argument")
+ _oops._annspecialcase_ = 'specialize:memo'
+
unrolling_arg_tps = unrolling_iterable(enumerate(args))
def wrapper(*args):
real_args = ()
+ # XXX 'to_free' leaks if an allocation fails with MemoryError
+ # and was not the first in this function
to_free = ()
for i, TARGET in unrolling_arg_tps:
arg = args[i]
- freeme = None
- if TARGET == CCHARP:
+ if TARGET == CCHARP or TARGET is VOIDP:
if arg is None:
arg = lltype.nullptr(CCHARP.TO) # None => (char*)NULL
- freeme = arg
+ to_free = to_free + (arg, '\x04')
elif isinstance(arg, str):
- arg = str2charp(arg)
- # XXX leaks if a str2charp() fails with MemoryError
- # and was not the first in this function
- freeme = arg
+ tup = get_nonmovingbuffer_final_null(arg)
+ to_free = to_free + tup
+ arg = tup[0]
+ elif isinstance(arg, unicode):
+ _oops()
elif TARGET == CWCHARP:
if arg is None:
arg = lltype.nullptr(CWCHARP.TO) # None => (wchar_t*)NULL
- freeme = arg
+ to_free = to_free + (arg,)
elif isinstance(arg, unicode):
arg = unicode2wcharp(arg)
- # XXX leaks if a unicode2wcharp() fails with MemoryError
- # and was not the first in this function
- freeme = arg
- elif TARGET is VOIDP:
- if arg is None:
- arg = lltype.nullptr(VOIDP.TO)
- elif isinstance(arg, str):
- arg = str2charp(arg)
- freeme = arg
- elif isinstance(arg, unicode):
- arg = unicode2wcharp(arg)
- freeme = arg
+ to_free = to_free + (arg,)
elif _isfunctype(TARGET) and not _isllptr(arg):
# XXX pass additional arguments
use_gil = invoke_around_handlers
@@ -283,11 +279,22 @@
or TARGET is lltype.Bool)):
arg = cast(TARGET, arg)
real_args = real_args + (arg,)
- to_free = to_free + (freeme,)
res = call_external_function(*real_args)
for i, TARGET in unrolling_arg_tps:
- if to_free[i]:
- lltype.free(to_free[i], flavor='raw')
+ arg = args[i]
+ if TARGET == CCHARP or TARGET is VOIDP:
+ if arg is None:
+ to_free = to_free[2:]
+ elif isinstance(arg, str):
+ free_nonmovingbuffer(arg, to_free[0], to_free[1])
+ to_free = to_free[2:]
+ elif TARGET == CWCHARP:
+ if arg is None:
+ to_free = to_free[1:]
+ elif isinstance(arg, unicode):
+ free_wcharp(to_free[0])
+ to_free = to_free[1:]
+ assert len(to_free) == 0
if rarithmetic.r_int is not r_int:
if result is INT:
return cast(lltype.Signed, res)
@@ -816,52 +823,69 @@
string is already nonmovable or could be pinned. Must be followed by a
free_nonmovingbuffer call.
- First bool returned indicates if 'data' was pinned. Second bool returned
- indicates if we did a raw alloc because pinning failed. Both bools
- should never be true at the same time.
+ Also returns a char:
+ * \4: no pinning, returned pointer is inside 'data' which is nonmovable
+ * \5: 'data' was pinned, returned pointer is inside
+ * \6: pinning failed, returned pointer is raw malloced
+
+ For strings (not unicodes), the len()th character of the resulting
+ raw buffer is available, but not initialized. Use
+ get_nonmovingbuffer_final_null() instead of get_nonmovingbuffer()
+ to get a regular null-terminated "char *".
"""
lldata = llstrtype(data)
count = len(data)
- pinned = False
- if rgc.can_move(data):
- if rgc.pin(data):
- pinned = True
+ if we_are_translated_to_c() and not rgc.can_move(data):
+ flag = '\x04'
+ else:
+ if we_are_translated_to_c() and rgc.pin(data):
+ flag = '\x05'
else:
- buf = lltype.malloc(TYPEP.TO, count, flavor='raw')
+ buf = lltype.malloc(TYPEP.TO, count + (TYPEP is CCHARP),
+ flavor='raw')
copy_string_to_raw(lldata, buf, 0, count)
- return buf, pinned, True
+ return buf, '\x06'
# ^^^ raw malloc used to get a nonmovable copy
#
- # following code is executed if:
+ # following code is executed after we're translated to C, if:
# - rgc.can_move(data) and rgc.pin(data) both returned true
# - rgc.can_move(data) returned false
data_start = cast_ptr_to_adr(lldata) + \
offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0)
- return cast(TYPEP, data_start), pinned, False
+ return cast(TYPEP, data_start), flag
# ^^^ already nonmovable. Therefore it's not raw allocated nor
# pinned.
get_nonmovingbuffer._always_inline_ = 'try' # get rid of the returned tuple
get_nonmovingbuffer._annenforceargs_ = [strtype]
- # (str, char*, bool, bool) -> None
+ @jit.dont_look_inside
+ def get_nonmovingbuffer_final_null(data):
+ tup = get_nonmovingbuffer(data)
+ buf, flag = tup
+ buf[len(data)] = lastchar
+ return tup
+ get_nonmovingbuffer_final_null._always_inline_ = 'try'
+ get_nonmovingbuffer_final_null._annenforceargs_ = [strtype]
+
+ # (str, char*, char) -> None
# Can't inline this because of the raw address manipulation.
@jit.dont_look_inside
- def free_nonmovingbuffer(data, buf, is_pinned, is_raw):
+ def free_nonmovingbuffer(data, buf, flag):
"""
- Keep 'data' alive and unpin it if it was pinned ('is_pinned' is true).
- Otherwise free the non-moving copy ('is_raw' is true).
+ Keep 'data' alive and unpin it if it was pinned (flag==\5).
+ Otherwise free the non-moving copy (flag==\6).
"""
- if is_pinned:
+ if flag == '\x05':
rgc.unpin(data)
- if is_raw:
+ if flag == '\x06':
lltype.free(buf, flavor='raw')
- # if is_pinned and is_raw are false: data was already nonmovable,
+ # if flag == '\x04': data was already nonmovable,
# we have nothing to clean up
keepalive_until_here(data)
- free_nonmovingbuffer._annenforceargs_ = [strtype, None, bool, bool]
+ free_nonmovingbuffer._annenforceargs_ = [strtype, None, None]
# int -> (char*, str, int)
# Can't inline this because of the raw address manipulation.
@@ -947,18 +971,19 @@
return (str2charp, free_charp, charp2str,
get_nonmovingbuffer, free_nonmovingbuffer,
+ get_nonmovingbuffer_final_null,
alloc_buffer, str_from_buffer, keep_buffer_alive_until_here,
charp2strn, charpsize2str, str2chararray, str2rawmem,
)
(str2charp, free_charp, charp2str,
- get_nonmovingbuffer, free_nonmovingbuffer,
+ get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null,
alloc_buffer, str_from_buffer, keep_buffer_alive_until_here,
charp2strn, charpsize2str, str2chararray, str2rawmem,
) = make_string_mappings(str)
(unicode2wcharp, free_wcharp, wcharp2unicode,
- get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer,
+ get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, __not_usable,
alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here,
wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem,
) = make_string_mappings(unicode)
@@ -1194,10 +1219,28 @@
def __init__(self, data):
self.data = data
def __enter__(self):
- self.buf, self.pinned, self.is_raw = get_nonmovingbuffer(self.data)
+ self.buf, self.flag = get_nonmovingbuffer(self.data)
return self.buf
def __exit__(self, *args):
- free_nonmovingbuffer(self.data, self.buf, self.pinned, self.is_raw)
+ free_nonmovingbuffer(self.data, self.buf, self.flag)
+ __init__._always_inline_ = 'try'
+ __enter__._always_inline_ = 'try'
+ __exit__._always_inline_ = 'try'
+
+class scoped_view_charp:
+ """Returns a 'char *' that (tries to) point inside the given RPython
+ string (which must not be None). You can replace scoped_str2charp()
+ with scoped_view_charp() in all places that guarantee that the
+ content of the 'char[]' array will not be modified.
+ """
+ def __init__(self, data):
+ self.data = data
+ __init__._annenforceargs_ = [None, annmodel.SomeString(can_be_None=False)]
+ def __enter__(self):
+ self.buf, self.flag = get_nonmovingbuffer_final_null(self.data)
+ return self.buf
+ def __exit__(self, *args):
+ free_nonmovingbuffer(self.data, self.buf, self.flag)
__init__._always_inline_ = 'try'
__enter__._always_inline_ = 'try'
__exit__._always_inline_ = 'try'
@@ -1206,10 +1249,10 @@
def __init__(self, data):
self.data = data
def __enter__(self):
- self.buf, self.pinned, self.is_raw = get_nonmoving_unicodebuffer(self.data)
+ self.buf, self.flag = get_nonmoving_unicodebuffer(self.data)
return self.buf
def __exit__(self, *args):
- free_nonmoving_unicodebuffer(self.data, self.buf, self.pinned, self.is_raw)
+ free_nonmoving_unicodebuffer(self.data, self.buf, self.flag)
__init__._always_inline_ = 'try'
__enter__._always_inline_ = 'try'
__exit__._always_inline_ = 'try'
diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -1238,7 +1238,8 @@
# ____________________________________________________________
STR.become(GcStruct('rpy_string', ('hash', Signed),
- ('chars', Array(Char, hints={'immutable': True})),
+ ('chars', Array(Char, hints={'immutable': True,
+ 'extra_item_after_alloc': 1})),
adtmeths={'malloc' : staticAdtMethod(mallocstr),
'empty' : staticAdtMethod(emptystrfun),
'copy_contents' : staticAdtMethod(copy_string_contents),
diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py
--- a/rpython/rtyper/lltypesystem/test/test_rffi.py
+++ b/rpython/rtyper/lltypesystem/test/test_rffi.py
@@ -516,7 +516,7 @@
def test_nonmovingbuffer(self):
d = 'some cool data that should not move'
def f():
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
counter = 0
for i in range(len(d)):
@@ -524,7 +524,7 @@
counter += 1
return counter
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
assert f() == len(d)
fn = self.compile(f, [], gcpolicy='ref')
assert fn() == len(d)
@@ -534,13 +534,13 @@
def f():
counter = 0
for n in range(32):
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
return counter
fn = self.compile(f, [], gcpolicy='semispace')
# The semispace gc uses raw_malloc for its internal data structs
@@ -555,13 +555,13 @@
def f():
counter = 0
for n in range(32):
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
return counter
fn = self.compile(f, [], gcpolicy='incminimark')
# The incminimark gc uses raw_malloc for its internal data structs
@@ -835,3 +835,11 @@
if hasattr(rffi, '__INT128_T'):
value = 0xAAAABBBBCCCCDDDD
assert cast(rffi.__INT128_T, r_uint64(value)) == value
+
+def test_scoped_view_charp():
+ s = 'bar'
+ with scoped_view_charp(s) as buf:
+ assert buf[0] == 'b'
+ assert buf[1] == 'a'
+ assert buf[2] == 'r'
+ assert buf[3] == '\x00'
diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py
--- a/rpython/translator/c/node.py
+++ b/rpython/translator/c/node.py
@@ -253,8 +253,11 @@
yield '\t' + cdecl(typename, fname) + ';'
if not self.ARRAY._hints.get('nolength', False):
yield '\tlong length;'
+ varlength = self.varlength
+ if varlength is not None:
+ varlength += self.ARRAY._hints.get('extra_item_after_alloc', 0)
line = '%s;' % cdecl(self.itemtypename,
- 'items[%s]' % deflength(self.varlength))
+ 'items[%s]' % deflength(varlength))
if self.ARRAY.OF is Void: # strange
line = '/* array of void */'
if self.ARRAY._hints.get('nolength', False):
diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py
--- a/rpython/translator/c/test/test_lltyped.py
+++ b/rpython/translator/c/test/test_lltyped.py
@@ -1,4 +1,4 @@
-import py
+import py, random
from rpython.rtyper.lltypesystem.lltype import *
from rpython.rtyper.lltypesystem import rffi
from rpython.translator.c.test.test_genc import compile
@@ -255,28 +255,6 @@
res2 = fn(0)
assert res1 == res2
- def test_null_padding(self):
- py.test.skip("we no longer pad our RPython strings with a final NUL")
- from rpython.rtyper.lltypesystem import llmemory
- from rpython.rtyper.lltypesystem import rstr
- chars_offset = llmemory.FieldOffset(rstr.STR, 'chars') + \
- llmemory.ArrayItemsOffset(rstr.STR.chars)
- # sadly, there's no way of forcing this to fail if the strings
- # are allocated in a region of memory such that they just
- # happen to get a NUL byte anyway :/ (a debug build will
- # always fail though)
- def trailing_byte(s):
- adr_s = llmemory.cast_ptr_to_adr(s)
- return (adr_s + chars_offset).char[len(s)]
- def f(x):
- r = 0
- for i in range(x):
- r += ord(trailing_byte(' '*(100-x*x)))
- return r
- fn = self.getcompiled(f, [int])
- res = fn(10)
- assert res == 0
-
def test_cast_primitive(self):
def f(x):
x = cast_primitive(UnsignedLongLong, x)
@@ -1023,3 +1001,49 @@
assert fn(r_longlong(1)) == True
assert fn(r_longlong(256)) == True
assert fn(r_longlong(2**32)) == True
+
+ def test_extra_item_after_alloc(self):
+ from rpython.rlib import rgc
+ from rpython.rtyper.lltypesystem import lltype
+ from rpython.rtyper.lltypesystem import rstr
+ # all STR objects should be allocated with enough space for one
+ # extra char. Check this for prebuilt strings, and for dynamically
+ # allocated ones with the default GC for tests. Use strings of 8,
+ # 16 and 24 chars because if the extra char is missing, writing to it
+ # is likely to cause corruption in nearby structures.
+ sizes = [random.choice([8, 16, 24]) for i in range(100)]
+ A = lltype.Struct('A', ('x', lltype.Signed))
+ prebuilt = [(rstr.mallocstr(sz),
+ lltype.malloc(A, flavor='raw', immortal=True))
+ for sz in sizes]
+ k = 0
+ for i, (s, a) in enumerate(prebuilt):
+ a.x = i
+ for i in range(len(s.chars)):
+ k += 1
+ if k == 256:
+ k = 1
+ s.chars[i] = chr(k)
+
+ def check(lst):
+ hashes = []
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i
+ rgc.ll_write_final_null_char(s)
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i # check it was not overwritten
+ def f():
+ check(prebuilt)
+ lst1 = []
+ for i, sz in enumerate(sizes):
+ s = rstr.mallocstr(sz)
+ a = lltype.malloc(A, flavor='raw')
+ a.x = i
+ lst1.append((s, a))
+ check(lst1)
+ for _, a in lst1:
+ lltype.free(a, flavor='raw')
+ return 42
+
+ fn = self.getcompiled(f, [])
+ assert fn() == 42
diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py
--- a/rpython/translator/c/test/test_newgc.py
+++ b/rpython/translator/c/test/test_newgc.py
@@ -3,6 +3,7 @@
import os
import sys
import subprocess
+import random
import py
@@ -1468,6 +1469,52 @@
res = self.run('nursery_hash_base')
assert res >= 195
+ def define_extra_item_after_alloc(cls):
+ from rpython.rtyper.lltypesystem import rstr
+ # all STR objects should be allocated with enough space for
+ # one extra char. Check this with our GCs. Use strings of 8,
+ # 16 and 24 chars because if the extra char is missing,
+ # writing to it is likely to cause corruption in nearby
+ # structures.
+ sizes = [random.choice([8, 16, 24]) for i in range(100)]
+ A = lltype.Struct('A', ('x', lltype.Signed))
+ prebuilt = [(rstr.mallocstr(sz),
+ lltype.malloc(A, flavor='raw', immortal=True))
+ for sz in sizes]
+ k = 0
+ for i, (s, a) in enumerate(prebuilt):
+ a.x = i
+ for i in range(len(s.chars)):
+ k += 1
+ if k == 256:
+ k = 1
+ s.chars[i] = chr(k)
+
+ def check(lst):
+ hashes = []
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i
+ rgc.ll_write_final_null_char(s)
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i # check it was not overwritten
+ def fn():
+ check(prebuilt)
+ lst1 = []
+ for i, sz in enumerate(sizes):
+ s = rstr.mallocstr(sz)
+ a = lltype.malloc(A, flavor='raw')
+ a.x = i
+ lst1.append((s, a))
+ check(lst1)
+ for _, a in lst1:
+ lltype.free(a, flavor='raw')
+ return 42
+ return fn
+
+ def test_extra_item_after_alloc(self):
+ res = self.run('extra_item_after_alloc')
+ assert res == 42
+
class TestGenerationalGC(TestSemiSpaceGC):
gcpolicy = "generation"
diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py
--- a/rpython/translator/tool/test/test_staticsizereport.py
+++ b/rpython/translator/tool/test/test_staticsizereport.py
@@ -67,7 +67,7 @@
(4 * S + 2 * P) + # struct dicttable
(S + 2 * 8192) + # indexes, length 8192, rffi.USHORT
(S + (S + S) * 3840) + # entries, length 3840
- (S + S + 5) * 3840) # 3840 strings with 5 chars each
+ (S + S + 6) * 3840) # 3840 strings with 5 chars each (+1 final)
assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed)
assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(rffi.VOIDP)
From pypy.commits at gmail.com Tue Aug 2 12:11:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 09:11:42 -0700 (PDT)
Subject: [pypy-commit] cffi default: The null_byte_after_str branch of PyPy
makes ffi.from_buffer(str)
Message-ID: <57a0c63e.65efc20a.6e316.2043@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2731:a54b242f428f
Date: 2016-08-02 18:13 +0200
http://bitbucket.org/cffi/cffi/changeset/a54b242f428f/
Log: The null_byte_after_str branch of PyPy makes ffi.from_buffer(str)
possible.
diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c
--- a/c/_cffi_backend.c
+++ b/c/_cffi_backend.c
@@ -5974,6 +5974,11 @@
static int invalid_input_buffer_type(PyObject *x)
{
+ /* From PyPy 5.4, from_buffer() accepts strings (but still not buffers
+ or memoryviews on strings). */
+ if (PyBytes_Check(x))
+ return 0;
+
#if PY_MAJOR_VERSION < 3
if (PyBuffer_Check(x)) {
/* XXX fish fish fish in an inofficial way */
diff --git a/c/test_c.py b/c/test_c.py
--- a/c/test_c.py
+++ b/c/test_c.py
@@ -3341,13 +3341,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/doc/source/ref.rst b/doc/source/ref.rst
--- a/doc/source/ref.rst
+++ b/doc/source/ref.rst
@@ -171,7 +171,7 @@
buffer interface. This is the opposite of ``ffi.buffer()``. It gives
a reference to the existing data, not a copy; for this
reason, and for PyPy compatibility, it does not work with the built-in
-types str or unicode (or buffers/memoryviews on them).
+type unicode; nor buffers/memoryviews to byte or unicode strings.
It is meant to be used on objects
containing large quantities of raw data, like bytearrays
or ``array.array`` or numpy
@@ -193,6 +193,9 @@
method is called), then the ```` object will point to freed
memory and must not be used any more.
+*New in version 1.8:* the python_buffer can be a byte string (but still
+not a buffer/memoryview on a string).
+
ffi.memmove()
+++++++++++++
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -3,6 +3,13 @@
======================
+v1.8
+====
+
+* Removed the restriction that ``ffi.from_buffer()`` cannot be used on
+ byte strings (PyPy was improved and can now support that case).
+
+
v1.7
====
From pypy.commits at gmail.com Tue Aug 2 12:13:10 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 09:13:10 -0700 (PDT)
Subject: [pypy-commit] pypy default: Document branch
Message-ID: <57a0c696.82ddc20a.5e0f1.23d5@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85992:7d9e19e2e836
Date: 2016-08-02 18:15 +0200
http://bitbucket.org/pypy/pypy/changeset/7d9e19e2e836/
Log: Document branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -109,3 +109,11 @@
.. branch: jitlog-exact-source-lines
Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)``.
From pypy.commits at gmail.com Tue Aug 2 13:10:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 02 Aug 2016 10:10:43 -0700 (PDT)
Subject: [pypy-commit] pypy default: More documentation
Message-ID: <57a0d413.041f1c0a.8b27f.f274@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r85993:fe9b1fb401fa
Date: 2016-08-02 19:12 +0200
http://bitbucket.org/pypy/pypy/changeset/fe9b1fb401fa/
Log: More documentation
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -116,4 +116,6 @@
It is used to hold a final zero in case we need some ``char *``
representation of the string, together with checks like ``not
can_move()`` or object pinning. Main new thing that this allows:
-``ffi.from_buffer(string)``.
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
From pypy.commits at gmail.com Tue Aug 2 15:08:23 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 12:08:23 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Create a real dict using the
ClassDictStrategy in W_TypeObject.getdict()
Message-ID: <57a0efa7.c4ebc20a.86c3a.5a66@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r85994:b7e537a214fd
Date: 2016-08-02 20:01 +0100
http://bitbucket.org/pypy/pypy/changeset/b7e537a214fd/
Log: Create a real dict using the ClassDictStrategy in
W_TypeObject.getdict()
* DictProxyStrategy renamed to ClassDictStrategy
* dictproxyobject.py renamed to classdict.py
Note: in CPython, type.__dict__ returns a hidden mutable dict
opaquely wrapped by a read-only mappingproxy object. This commit
creates the former.
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/classdict.py
rename from pypy/objspace/std/dictproxyobject.py
rename to pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/classdict.py
@@ -38,7 +38,7 @@
)
-class DictProxyStrategy(DictStrategy):
+class ClassDictStrategy(DictStrategy):
"""Exposes a W_TypeObject.dict_w at app-level.
Uses getdictvalue() and setdictvalue() to access items.
@@ -142,7 +142,7 @@
# keys are utf-8 encoded identifiers from type's dict_w
return space.wrap(key.decode('utf-8'))
-create_iterator_classes(DictProxyStrategy)
+create_iterator_classes(ClassDictStrategy)
class MappingProxyStrategy(DictStrategy):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -484,13 +484,13 @@
del self.lazyloaders
def getdict(self, space): # returning a dict-proxy!
- from pypy.objspace.std.dictproxyobject import DictProxyStrategy
- from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ from pypy.objspace.std.classdict import ClassDictStrategy
+ from pypy.objspace.std.dictmultiobject import W_DictObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
- strategy = space.fromcache(DictProxyStrategy)
+ strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- return W_DictProxyObject(space, strategy, storage)
+ return W_DictObject(space, strategy, storage)
def is_heaptype(self):
return self.flag_heaptype
From pypy.commits at gmail.com Tue Aug 2 15:51:39 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Tue, 02 Aug 2016 12:51:39 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Add parser and syntax tests for
async and await, remove set_sentinel comment
Message-ID: <57a0f9cb.271ac20a.e6d8c.6887@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r85995:cf642e39ca7f
Date: 2016-08-02 21:51 +0200
http://bitbucket.org/pypy/pypy/changeset/cf642e39ca7f/
Log: Add parser and syntax tests for async and await, remove set_sentinel
comment
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -169,8 +169,15 @@
py.test.raises(SyntaxError, self.parse, 'x = 5 # comment\nx = 6\n', "single")
def test_async_await(self):
- self.parse("async def coro(): pass")
- self.parse("await result = func()")
+ self.parse("async def coro(): await func")
+ #Test as var and func name
+ self.parse("async = 1")
+ self.parse("await = 1")
+ self.parse("def async(): pass")
+ #async for
+ self.parse("async def foo(): async for a in b: pass")
+ #async with
+ self.parse("async def foo(): async with a: pass")
class TestPythonParserWithSpace:
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -99,7 +99,13 @@
async def foo():
await await fut
-
+
+ await x
+
+ def foo(): async for a in b: pass
+
+ def foo(): async with a: pass
+
""")
diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py
--- a/pypy/module/thread/os_lock.py
+++ b/pypy/module/thread/os_lock.py
@@ -147,7 +147,6 @@
def set_sentinel(space):
"""Set a sentinel lock that will be released when the current thread
state is finalized (after it is untied from the interpreter)."""
- #NOT IMPLEMENTED YET!!! (required for some libs to work)
return space.wrap(Lock(space))
class W_RLock(W_Root):
From pypy.commits at gmail.com Tue Aug 2 18:03:17 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 15:03:17 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merged in
marky1991/pypy_new/py3k-finish_time (pull request #465)
Message-ID: <57a118a5.10a81c0a.7b92a.554d@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r85997:b5091dc062fd
Date: 2016-08-02 23:02 +0100
http://bitbucket.org/pypy/pypy/changeset/b5091dc062fd/
Log: Merged in marky1991/pypy_new/py3k-finish_time (pull request #465)
Make all tests pass for the time module.
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -541,6 +541,8 @@
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
t_ref[0] = tt
pbuf = c_localtime(t_ref)
+ rffi.setintfield(pbuf, "c_tm_year",
+ rffi.getintfield(pbuf, "c_tm_year") + 1900)
lltype.free(t_ref, flavor='raw')
if not pbuf:
raise OperationError(space.w_ValueError,
@@ -584,7 +586,7 @@
if rffi.getintfield(glob_buf, 'c_tm_wday') < -1:
raise oefmt(space.w_ValueError, "day of week out of range")
- rffi.setintfield(glob_buf, 'c_tm_year', y - 1900)
+ rffi.setintfield(glob_buf, 'c_tm_year', y)
rffi.setintfield(glob_buf, 'c_tm_mon',
rffi.getintfield(glob_buf, 'c_tm_mon') - 1)
rffi.setintfield(glob_buf, 'c_tm_wday',
@@ -648,7 +650,8 @@
t_ref[0] = seconds
p = c_localtime(t_ref)
if not p:
- raise oefmt(space.w_ValueError, "unconvertible time")
+ raise oefmt(space.w_OSError, "unconvertible time")
+ rffi.setintfield(p, "c_tm_year", rffi.getintfield(p, "c_tm_year") + 1900)
return _asctime(space, p)
# by now w_tup is an optional argument (and not *args)
@@ -677,7 +680,7 @@
w(getif(t_ref, 'c_tm_hour')),
w(getif(t_ref, 'c_tm_min')),
w(getif(t_ref, 'c_tm_sec')),
- w(getif(t_ref, 'c_tm_year') + 1900)]
+ w(getif(t_ref, 'c_tm_year'))]
return space.mod(w("%.3s %.3s%3d %.2d:%.2d:%.2d %d"),
space.newtuple(args))
@@ -715,7 +718,7 @@
lltype.free(t_ref, flavor='raw')
if not p:
- raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
+ raise OperationError(space.w_OSError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p)
def mktime(space, w_tup):
@@ -725,6 +728,7 @@
buf = _gettmarg(space, w_tup, allowNone=False)
rffi.setintfield(buf, "c_tm_wday", -1)
+ rffi.setintfield(buf, "c_tm_year", rffi.getintfield(buf, "c_tm_year") - 1900)
tt = c_mktime(buf)
# A return value of -1 does not necessarily mean an error, but tm_wday
# cannot remain set to -1 if mktime succeeds.
@@ -801,6 +805,8 @@
rffi.setintfield(buf_value, 'c_tm_isdst', -1)
elif rffi.getintfield(buf_value, 'c_tm_isdst') > 1:
rffi.setintfield(buf_value, 'c_tm_isdst', 1)
+ rffi.setintfield(buf_value, "c_tm_year",
+ rffi.getintfield(buf_value, "c_tm_year") - 1900)
if _WIN:
# check that the format string contains only valid directives
From pypy.commits at gmail.com Tue Aug 2 18:03:28 2016
From: pypy.commits at gmail.com (marky1991)
Date: Tue, 02 Aug 2016 15:03:28 -0700 (PDT)
Subject: [pypy-commit] pypy py3k-finish_time: Make all tests pass for the
time module. lib-python/test_time.py still fails test_mktime_error when run
untranslated, but this test is skipped when run translated anyway.
Message-ID: <57a118b0.28eac20a.8f95a.922a@mx.google.com>
Author: Mark Young
Branch: py3k-finish_time
Changeset: r85996:80df0bb39be6
Date: 2016-07-17 23:51 -0400
http://bitbucket.org/pypy/pypy/changeset/80df0bb39be6/
Log: Make all tests pass for the time module. lib-python/test_time.py
still fails test_mktime_error when run untranslated, but this test
is skipped when run translated anyway.
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -541,6 +541,8 @@
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
t_ref[0] = tt
pbuf = c_localtime(t_ref)
+ rffi.setintfield(pbuf, "c_tm_year",
+ rffi.getintfield(pbuf, "c_tm_year") + 1900)
lltype.free(t_ref, flavor='raw')
if not pbuf:
raise OperationError(space.w_ValueError,
@@ -584,7 +586,7 @@
if rffi.getintfield(glob_buf, 'c_tm_wday') < -1:
raise oefmt(space.w_ValueError, "day of week out of range")
- rffi.setintfield(glob_buf, 'c_tm_year', y - 1900)
+ rffi.setintfield(glob_buf, 'c_tm_year', y)
rffi.setintfield(glob_buf, 'c_tm_mon',
rffi.getintfield(glob_buf, 'c_tm_mon') - 1)
rffi.setintfield(glob_buf, 'c_tm_wday',
@@ -648,7 +650,8 @@
t_ref[0] = seconds
p = c_localtime(t_ref)
if not p:
- raise oefmt(space.w_ValueError, "unconvertible time")
+ raise oefmt(space.w_OSError, "unconvertible time")
+ rffi.setintfield(p, "c_tm_year", rffi.getintfield(p, "c_tm_year") + 1900)
return _asctime(space, p)
# by now w_tup is an optional argument (and not *args)
@@ -677,7 +680,7 @@
w(getif(t_ref, 'c_tm_hour')),
w(getif(t_ref, 'c_tm_min')),
w(getif(t_ref, 'c_tm_sec')),
- w(getif(t_ref, 'c_tm_year') + 1900)]
+ w(getif(t_ref, 'c_tm_year'))]
return space.mod(w("%.3s %.3s%3d %.2d:%.2d:%.2d %d"),
space.newtuple(args))
@@ -715,7 +718,7 @@
lltype.free(t_ref, flavor='raw')
if not p:
- raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
+ raise OperationError(space.w_OSError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p)
def mktime(space, w_tup):
@@ -725,6 +728,7 @@
buf = _gettmarg(space, w_tup, allowNone=False)
rffi.setintfield(buf, "c_tm_wday", -1)
+ rffi.setintfield(buf, "c_tm_year", rffi.getintfield(buf, "c_tm_year") - 1900)
tt = c_mktime(buf)
# A return value of -1 does not necessarily mean an error, but tm_wday
# cannot remain set to -1 if mktime succeeds.
@@ -801,6 +805,8 @@
rffi.setintfield(buf_value, 'c_tm_isdst', -1)
elif rffi.getintfield(buf_value, 'c_tm_isdst') > 1:
rffi.setintfield(buf_value, 'c_tm_isdst', 1)
+ rffi.setintfield(buf_value, "c_tm_year",
+ rffi.getintfield(buf_value, "c_tm_year") - 1900)
if _WIN:
# check that the format string contains only valid directives
@@ -970,8 +976,11 @@
lltype.scoped_alloc(rwin32.FILETIME) as exit_time, \
lltype.scoped_alloc(rwin32.FILETIME) as kernel_time, \
lltype.scoped_alloc(rwin32.FILETIME) as user_time:
- GetProcessTimes(current_process, creation_time, exit_time,
- kernel_time, user_time)
+ worked = GetProcessTimes(current_process, creation_time, exit_time,
+ kernel_time, user_time)
+ if not worked:
+ raise wrap_windowserror(space,
+ rwin32.lastSavedWindowsError("GetProcessTimes"))
kernel_time2 = (kernel_time.c_dwLowDateTime |
r_ulonglong(kernel_time.c_dwHighDateTime) << 32)
user_time2 = (user_time.c_dwLowDateTime |
From pypy.commits at gmail.com Wed Aug 3 00:29:21 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 21:29:21 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Fix test_dictproxy.py to actually
match the expected behaviour
Message-ID: <57a17321.e129c20a.19a07.e965@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r85998:25dc55060e7c
Date: 2016-08-03 04:59 +0100
http://bitbucket.org/pypy/pypy/changeset/25dc55060e7c/
Log: Fix test_dictproxy.py to actually match the expected behaviour
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -9,42 +9,20 @@
assert 'a' in NotEmpty.__dict__
assert 'a' in NotEmpty.__dict__.keys()
assert 'b' not in NotEmpty.__dict__
- NotEmpty.__dict__['b'] = 4
- assert NotEmpty.b == 4
- del NotEmpty.__dict__['b']
assert NotEmpty.__dict__.get("b") is None
+ raises(TypeError, "NotEmpty.__dict__['b'] = 4")
raises(TypeError, 'NotEmpty.__dict__[15] = "y"')
- raises(KeyError, 'del NotEmpty.__dict__[15]')
+ raises(TypeError, 'del NotEmpty.__dict__[15]')
- assert NotEmpty.__dict__.setdefault("string", 1) == 1
- assert NotEmpty.__dict__.setdefault("string", 2) == 1
- assert NotEmpty.string == 1
- raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
-
- def test_dictproxy_popitem(self):
- class A(object):
- a = 42
- seen = 0
- try:
- while True:
- key, value = A.__dict__.popitem()
- if key == 'a':
- assert value == 42
- seen += 1
- except KeyError:
- pass
- assert seen == 1
+ raises(AttributeError, 'NotEmpty.__dict__.setdefault')
def test_dictproxy_getitem(self):
class NotEmpty(object):
a = 1
assert 'a' in NotEmpty.__dict__
- class substr(str): pass
+ class substr(str):
+ pass
assert substr('a') in NotEmpty.__dict__
- # the following are only for py2
- ## assert u'a' in NotEmpty.__dict__
- ## assert NotEmpty.__dict__[u'a'] == 1
- ## assert u'\xe9' not in NotEmpty.__dict__
def test_dictproxyeq(self):
class a(object):
@@ -63,9 +41,9 @@
class a(object):
pass
s1 = repr(a.__dict__)
+ assert s1.startswith('mappingproxy({') and s1.endswith('})')
s2 = str(a.__dict__)
- assert s1 == s2
- assert s1.startswith('mappingproxy({') and s1.endswith('})')
+ assert s1 == 'mappingproxy(%s)' % s2
def test_immutable_dict_on_builtin_type(self):
raises(TypeError, "int.__dict__['a'] = 1")
@@ -100,4 +78,3 @@
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
-
From pypy.commits at gmail.com Wed Aug 3 00:29:23 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 02 Aug 2016 21:29:23 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Move cpyext implementation of
mappingproxy to objspace
Message-ID: <57a17323.109a1c0a.ee027.0f1a@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r85999:fd9153375a2a
Date: 2016-08-03 05:17 +0100
http://bitbucket.org/pypy/pypy/changeset/fd9153375a2a/
Log: Move cpyext implementation of mappingproxy to objspace
diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/module/cpyext/dictproxyobject.py
--- a/pypy/module/cpyext/dictproxyobject.py
+++ b/pypy/module/cpyext/dictproxyobject.py
@@ -1,67 +1,7 @@
-# Read-only proxy for mappings. PyPy does not have a separate type for
-# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
-
-from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
-from pypy.interpreter.typedef import TypeDef, interp2app
+from pypy.objspace.std.dictproxyobject import W_DictProxyObject
from pypy.module.cpyext.api import cpython_api, build_type_checkers
from pypy.module.cpyext.pyobject import PyObject
-class W_DictProxyObject(W_Root):
- "Read-only proxy for mappings."
-
- def __init__(self, w_mapping):
- self.w_mapping = w_mapping
-
- def descr_len(self, space):
- return space.len(self.w_mapping)
-
- def descr_getitem(self, space, w_key):
- return space.getitem(self.w_mapping, w_key)
-
- def descr_contains(self, space, w_key):
- return space.contains(self.w_mapping, w_key)
-
- def descr_iter(self, space):
- return space.iter(self.w_mapping)
-
- def descr_str(self, space):
- return space.str(self.w_mapping)
-
- def descr_repr(self, space):
- return space.repr(self.w_mapping)
-
- @unwrap_spec(w_default=WrappedDefault(None))
- def get_w(self, space, w_key, w_default):
- return space.call_method(self.w_mapping, "get", w_key, w_default)
-
- def keys_w(self, space):
- return space.call_method(self.w_mapping, "keys")
-
- def values_w(self, space):
- return space.call_method(self.w_mapping, "values")
-
- def items_w(self, space):
- return space.call_method(self.w_mapping, "items")
-
- def copy_w(self, space):
- return space.call_method(self.w_mapping, "copy")
-
-W_DictProxyObject.typedef = TypeDef(
- 'mappingproxy',
- __len__=interp2app(W_DictProxyObject.descr_len),
- __getitem__=interp2app(W_DictProxyObject.descr_getitem),
- __contains__=interp2app(W_DictProxyObject.descr_contains),
- __iter__=interp2app(W_DictProxyObject.descr_iter),
- __str__=interp2app(W_DictProxyObject.descr_str),
- __repr__=interp2app(W_DictProxyObject.descr_repr),
- get=interp2app(W_DictProxyObject.get_w),
- keys=interp2app(W_DictProxyObject.keys_w),
- values=interp2app(W_DictProxyObject.values_w),
- items=interp2app(W_DictProxyObject.items_w),
- copy=interp2app(W_DictProxyObject.copy_w)
-)
-
PyDictProxy_Check, PyDictProxy_CheckExact = build_type_checkers(
"DictProxy", W_DictProxyObject)
diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
copy from pypy/module/cpyext/dictproxyobject.py
copy to pypy/objspace/std/dictproxyobject.py
--- a/pypy/module/cpyext/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -4,8 +4,6 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, interp2app
-from pypy.module.cpyext.api import cpython_api, build_type_checkers
-from pypy.module.cpyext.pyobject import PyObject
class W_DictProxyObject(W_Root):
"Read-only proxy for mappings."
@@ -61,10 +59,3 @@
items=interp2app(W_DictProxyObject.items_w),
copy=interp2app(W_DictProxyObject.copy_w)
)
-
-PyDictProxy_Check, PyDictProxy_CheckExact = build_type_checkers(
- "DictProxy", W_DictProxyObject)
-
- at cpython_api([PyObject], PyObject)
-def PyDictProxy_New(space, w_dict):
- return space.wrap(W_DictProxyObject(w_dict))
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -486,11 +486,13 @@
def getdict(self, space): # returning a dict-proxy!
from pypy.objspace.std.classdict import ClassDictStrategy
from pypy.objspace.std.dictmultiobject import W_DictObject
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- return W_DictObject(space, strategy, storage)
+ w_dict = W_DictObject(space, strategy, storage)
+ return W_DictProxyObject(w_dict)
def is_heaptype(self):
return self.flag_heaptype
From pypy.commits at gmail.com Wed Aug 3 04:55:15 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 03 Aug 2016 01:55:15 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: updates to the blog post draft
(addressed cfbolz's comments)
Message-ID: <57a1b173.43681c0a.2325f.5b4b@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5661:37e031d5a693
Date: 2016-08-02 18:56 +0200
http://bitbucket.org/pypy/extradoc/changeset/37e031d5a693/
Log: updates to the blog post draft (addressed cfbolz's comments)
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -1,18 +1,26 @@
-PyPy's Toolbox got a new Hammer 🔨
+JitViewer moves to vmprof.com
=======
-.. : XXX the title is very generic
+We are happy to announce that VMProf got a major update. The most significant change is the movement of JitViewer (JV)
+to VMProf.
+JV allows you to inspect PyPy's internal compiler representation including the generated machine code of your program.
+A useful tool to understand PyPy, learn many details of our compiler and find potential issues related to our JIT.
+Both VMProf and JV share some common goals. That is the reason why they are now both packaged together.
+www.vmprof.com also got updated with various bugfixes and changes including an all new interface to JV.
-.. : XXX I don't actually like the first paragraph, I think it should be more
- to the point. eg that things happened at the Leysin sprint doesn't matter much.
- I would also add links to all the existing tools
+A advertisment: We constantly improve tooling and libraries around the Python/PyPy eco system.
+Here are a three examples you might also want to use in your Python projects:
-Tools, tools, tools! It seems that PyPy cannot get enough of them!
-In the last winter sprint (Leysin) covered the current tool for observing internals of the JIT compiler (JitViewer). VMProf at that time already proved that it is a good tool for CPU profiles. We are happy to release a new version of VMProf incorporating a rewritten version of JitViewer.
+* VMProf - A statistical CPU profiler
+* RevDB - A reverse debugger for Python
+* CFFI - Foreign Function Interface that avoids CPyExt
-The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools, most notably the JITViewer. Another problem was that the logging output of a long running program took a lot of disk space.
+A brand new JitViewer
+---------------------
-Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And *drumroll* you do not need to install JitViewer yourself anymore! The whole system moved to vmprof.com and you can use it any time.
+The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools, most notably JV. Additionaly the logging output of a long running program took a lot of disk space.
+
+Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And *drumroll* you do not need to install JV yourself anymore! The whole system moved to vmprof.com and you can use it any time.
Sounds great. But what can you do with it? Here are two examples useful for a PyPy user:
@@ -21,19 +29,19 @@
For some hard to find bugs it is often necessary to look at the compiled code. The old procedure often required to upload a plain text file which was hard to parse and to look through.
-The new way to share a crash report is to install the ``vmprof`` module from PyPi and execute either of the two commands:
+A new way to share a crash report is to install the ``vmprof`` module from PyPi and execute either of the two commands:
```
# this program does not crash, but has some weird behaviour
$ pypy -m jitlog --web
...
-PyPy Jitlog: http://vmprof.com/#/
+PyPy Jitlog: http://vmprof.com/#//traces
# this program segfaults
$ pypy -m jitlog -o /tmp/log
...
$ pypy -m jitlog --upload /tmp/log
-PyPy Jitlog: http://vmprof.com/#/
+PyPy Jitlog: http://vmprof.com/#//traces
```
Providing the link in the bug report enables PyPy developers browse and identify potential issues.
@@ -41,7 +49,7 @@
Speed issues
------------
-VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have idenified code that runs slow, you can switch to jitlog and maybe pin point certain aspects that do not behave as expected. You will find not only the overview, but are also able to browse the generated code. If you cannot make sense of that all you can just share the link with us and we can have a look at the compiled code.
+VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have idenified code that runs slow, you can switch to jitlog and maybe pin point certain aspects that do not behave as expected. You will find not only the overview, but are also able to browse the generated code. If you cannot make sense of that all you can just share the link with us and we can have a look at too.
Future direction
----------------
@@ -52,7 +60,7 @@
* Combination of CPU profiles and the JITLOG (Sadly did not make it into the current release)
-* Extend vmprof.com to be able to query vmprof/jitlog. Some times it is interesting to search for specific patterns the compiler produced. An example for vmprof: 'methods.callsites() > 5' and for the jitlog would be 'traces.contains('call_assembler').hasbridge('*my_func_name*')'
+* Extend vmprof.com to be able to query vmprof/jitlog. An example query for vmprof: 'methods.callsites() > 5' and for the jitlog would be 'traces.contains('call_assembler').hasbridge('*my_func_name*')'.
* Extend the jitlog to capture the information of the optimization stage
From pypy.commits at gmail.com Wed Aug 3 07:34:42 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 03 Aug 2016 04:34:42 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: merge default
Message-ID: <57a1d6d2.56421c0a.75916.9d16@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86000:8ec827c25980
Date: 2016-08-02 09:29 +0200
http://bitbucket.org/pypy/pypy/changeset/8ec827c25980/
Log: merge default
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -101,3 +101,11 @@
.. branch: jitlog-32bit
Resolve issues to use the new logging facility on a 32bit system
+
+.. branch: ep2016sprint
+
+Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
diff --git a/pypy/module/_jitlog/test/test__jitlog.py b/pypy/module/_jitlog/test/test__jitlog.py
--- a/pypy/module/_jitlog/test/test__jitlog.py
+++ b/pypy/module/_jitlog/test/test__jitlog.py
@@ -1,8 +1,9 @@
-
import sys
+import platform
from rpython.tool.udir import udir
from pypy.tool.pytest.objspace import gettestobjspace
from rpython.rlib.rjitlog import rjitlog as jl
+from rpython.jit.metainterp.resoperation import opname
class AppTestJitLog(object):
spaceconfig = {'usemodules': ['_jitlog', 'struct']}
@@ -12,6 +13,11 @@
cls.w_mark_header = cls.space.wrap(jl.MARK_JITLOG_HEADER)
cls.w_version = cls.space.wrap(jl.JITLOG_VERSION_16BIT_LE)
cls.w_is_32bit = cls.space.wrap(sys.maxint == 2**31-1)
+ cls.w_machine = cls.space.wrap(platform.machine())
+ cls.w_resops = cls.space.newdict()
+ space = cls.space
+ for key, value in opname.items():
+ space.setitem(cls.w_resops, space.wrap(key), space.wrap(value))
def test_enable(self):
import _jitlog, struct
@@ -25,8 +31,22 @@
assert fd.read(1) == self.mark_header
assert fd.read(2) == self.version
assert bool(ord(fd.read(1))) == self.is_32bit
+ strcount, = struct.unpack('ob_type->tp_as_number->nb_oct)
+ ret = obj->ob_type->tp_as_number->nb_oct(obj);
+ else
+ ret = PyLong_FromLong(-1);
+ Py_DECREF(obj);
+ return ret;
""")])
assert module.has_sub() == 0
assert module.has_pow() == 0
assert module.has_hex() == '0x2aL'
+ assert module.has_oct() == '052L'
diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py
--- a/pypy/module/cpyext/test/test_number.py
+++ b/pypy/module/cpyext/test/test_number.py
@@ -15,7 +15,7 @@
assert api.PyNumber_Check(space.wraplong(-12L))
assert api.PyNumber_Check(space.wrap(12.1))
assert not api.PyNumber_Check(space.wrap('12'))
- assert not api.PyNumber_Check(space.wrap(1+3j))
+ assert api.PyNumber_Check(space.wrap(1+3j))
def test_number_long(self, space, api):
w_l = api.PyNumber_Long(space.wrap(123))
@@ -151,7 +151,6 @@
'''
PyObject *obj = PyTuple_GET_ITEM(args, 0);
int val = PyNumber_Check(obj);
- Py_DECREF(obj);
return PyInt_FromLong(val);
''')])
val = mod.test_PyNumber_Check(10)
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -800,7 +800,7 @@
IntLike_Type.tp_as_number = &intlike_as_number;
intlike_as_number.nb_nonzero = intlike_nb_nonzero;
intlike_as_number.nb_int = intlike_nb_int;
- if (PyType_Ready(&IntLike_Type) < 0) return NULL;
+ PyType_Ready(&IntLike_Type);
""")
assert not bool(module.newInt(0))
assert bool(module.newInt(1))
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -46,6 +46,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from rpython.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
@@ -53,7 +54,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, intmask(next_instr))
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py
--- a/pypy/module/thread/os_lock.py
+++ b/pypy/module/thread/os_lock.py
@@ -26,8 +26,7 @@
elif timeout == -1.0:
microseconds = -1
else:
- # 0.0 => 0.0, but otherwise tends to round up
- timeout = timeout * 1e6 + 0.999
+ timeout *= 1e6
try:
microseconds = ovfcheck_float_to_longlong(timeout)
except OverflowError:
diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py
--- a/pypy/module/thread/test/test_lock.py
+++ b/pypy/module/thread/test/test_lock.py
@@ -81,7 +81,7 @@
else:
got_ovf = False
lock.release()
- assert (i, got_ovf) == (i, int(timeout * 1e6 + 0.999) > maxint)
+ assert (i, got_ovf) == (i, int(timeout * 1e6) > maxint)
@py.test.mark.xfail(machine()=='s390x', reason='may fail under heavy load')
def test_ping_pong(self):
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -424,29 +424,20 @@
raise oefmt(space.w_TypeError,
"'%T' objects are unhashable", w_obj)
w_result = space.get_and_call_function(w_hash, w_obj)
- w_resulttype = space.type(w_result)
# issue 2346 : returns now -2 for hashing -1 like cpython
- if space.is_w(w_resulttype, space.w_int):
- if space.int_w(w_result) == -1:
- return space.wrap(-2)
- return w_result
- elif space.isinstance_w(w_result, space.w_int):
- # be careful about subclasses of 'int'...
- int_result = space.int_w(w_result)
- if int_result == -1:
- int_result == -2
- return space.wrap(int_result)
+ if space.isinstance_w(w_result, space.w_int):
+ h = space.int_w(w_result)
elif space.isinstance_w(w_result, space.w_long):
- # be careful about subclasses of 'long'...
bigint = space.bigint_w(w_result)
h = bigint.hash()
- if h == -1:
- h = -2
- return space.wrap(h)
else:
raise oefmt(space.w_TypeError,
"__hash__() should return an int or long")
+ # turn -1 into -2 without using a condition, which would
+ # create a potential bridge in the JIT
+ h -= (h == -1)
+ return space.wrap(h)
def cmp(space, w_v, w_w):
diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py
--- a/pypy/objspace/std/test/test_stdobjspace.py
+++ b/pypy/objspace/std/test/test_stdobjspace.py
@@ -66,17 +66,18 @@
def test_wrap_various_unsigned_types(self):
import sys
+ from rpython.rlib.rarithmetic import r_uint
from rpython.rtyper.lltypesystem import lltype, rffi
space = self.space
value = sys.maxint * 2
- x = rffi.cast(lltype.Unsigned, value)
+ x = r_uint(value)
assert space.eq_w(space.wrap(value), space.wrap(x))
- x = rffi.cast(rffi.UINTPTR_T, value)
+ x = rffi.cast(rffi.UINTPTR_T, r_uint(value))
assert x > 0
assert space.eq_w(space.wrap(value), space.wrap(x))
value = 60000
- x = rffi.cast(rffi.USHORT, value)
+ x = rffi.cast(rffi.USHORT, r_uint(value))
assert space.eq_w(space.wrap(value), space.wrap(x))
value = 200
- x = rffi.cast(rffi.UCHAR, value)
+ x = rffi.cast(rffi.UCHAR, r_uint(value))
assert space.eq_w(space.wrap(value), space.wrap(x))
diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py
--- a/pypy/tool/pytest/objspace.py
+++ b/pypy/tool/pytest/objspace.py
@@ -128,3 +128,5 @@
def is_w(self, obj1, obj2):
return obj1 is obj2
+ def setitem(self, obj, key, value):
+ obj[key] = value
diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
--- a/rpython/annotator/binaryop.py
+++ b/rpython/annotator/binaryop.py
@@ -401,6 +401,9 @@
class __extend__(pairtype(SomeString, SomeTuple),
pairtype(SomeUnicodeString, SomeTuple)):
def mod((s_string, s_tuple)):
+ if not s_string.is_constant():
+ raise AnnotatorError("string formatting requires a constant "
+ "string/unicode on the left of '%'")
is_string = isinstance(s_string, SomeString)
is_unicode = isinstance(s_string, SomeUnicodeString)
assert is_string or is_unicode
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4623,6 +4623,14 @@
a = self.RPythonAnnotator()
a.build_types(main, [int])
+ def test_string_mod_nonconstant(self):
+ def f(x):
+ return x % 5
+ a = self.RPythonAnnotator()
+ e = py.test.raises(AnnotatorError, a.build_types, f, [str])
+ assert ('string formatting requires a constant string/unicode'
+ in str(e.value))
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py
--- a/rpython/jit/backend/x86/test/test_regloc.py
+++ b/rpython/jit/backend/x86/test/test_regloc.py
@@ -147,7 +147,7 @@
py.test.skip()
def test_reuse_scratch_register(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.begin_reuse_scratch_register()
cb.MOV(ecx, heap(base_addr))
@@ -167,7 +167,7 @@
# ------------------------------------------------------------
def test_64bit_address_1(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr))
# this case is a CMP_rj
@@ -181,7 +181,7 @@
assert cb.getvalue() == expected_instructions
def test_64bit_address_2(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr))
# this case is a CMP_ra
@@ -195,7 +195,7 @@
assert cb.getvalue() == expected_instructions
def test_64bit_address_3(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr))
# this case is a CMP_rm
@@ -211,7 +211,7 @@
assert cb.getvalue() == expected_instructions
def test_64bit_address_4(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.begin_reuse_scratch_register()
assert cb._reuse_scratch_register is True
@@ -234,7 +234,7 @@
# ------------------------------------------------------------
def test_MOV_64bit_constant_into_r11(self):
- base_constant = 0xFEDCBA9876543210
+ base_constant = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(r11, imm(base_constant))
@@ -245,7 +245,7 @@
assert cb.getvalue() == expected_instructions
def test_MOV_64bit_constant_into_rax(self):
- base_constant = 0xFEDCBA9876543210
+ base_constant = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(eax, imm(base_constant))
@@ -256,7 +256,7 @@
assert cb.getvalue() == expected_instructions
def test_MOV_64bit_address_into_r11(self):
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(r11, heap(base_addr))
@@ -270,7 +270,7 @@
def test_MOV_immed32_into_64bit_address_1(self):
immed = -0x01234567
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
ImmedLoc(immed))
@@ -286,7 +286,7 @@
def test_MOV_immed32_into_64bit_address_2(self):
immed = -0x01234567
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
ImmedLoc(immed))
@@ -302,7 +302,7 @@
def test_MOV_immed32_into_64bit_address_3(self):
immed = -0x01234567
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(edx, ImmedLoc(0), 0, base_addr),
ImmedLoc(immed))
@@ -320,7 +320,7 @@
def test_MOV_immed32_into_64bit_address_4(self):
immed = -0x01234567
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(edx, esi, 2, base_addr), ImmedLoc(immed))
# this case is a MOV_ai
@@ -339,7 +339,7 @@
def test_MOV_immed64_into_64bit_address_1(self):
immed = 0x0123456789ABCDEF
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
ImmedLoc(immed))
@@ -361,7 +361,7 @@
def test_MOV_immed64_into_64bit_address_2(self):
immed = 0x0123456789ABCDEF
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
ImmedLoc(immed))
@@ -383,7 +383,7 @@
def test_MOV_immed64_into_64bit_address_3(self):
immed = 0x0123456789ABCDEF
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(eax, ImmedLoc(0), 0, base_addr),
ImmedLoc(immed))
@@ -407,7 +407,7 @@
def test_MOV_immed64_into_64bit_address_4(self):
immed = 0x0123456789ABCDEF
- base_addr = 0xFEDCBA9876543210
+ base_addr = intmask(0xFEDCBA9876543210)
cb = LocationCodeBuilder64()
cb.MOV(AddressLoc(edx, eax, 2, base_addr), ImmedLoc(immed))
# this case is a MOV_ai
diff --git a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py
--- a/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py
+++ b/rpython/jit/backend/x86/test/test_ztranslation_call_assembler.py
@@ -4,6 +4,16 @@
from rpython.jit.backend.x86.arch import WORD
import sys
+
+# On Windows, this test crashes obscurely, but only if compiled with
+# Boehm, not if run with no GC at all. So for now we'll assume it is
+# really a Boehm bug, or maybe a Boehm-on-Windows-specific issue, and
+# skip.
+if sys.platform == 'win32':
+ import py
+ py.test.skip("crashes on Windows (Boehm issue?)")
+
+
class TestTranslationCallAssemblerX86(TranslationTestCallAssembler):
def _check_cbuilder(self, cbuilder):
#We assume here that we have sse2. If not, the CPUClass
diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py
--- a/rpython/jit/backend/zarch/test/test_assembler.py
+++ b/rpython/jit/backend/zarch/test/test_assembler.py
@@ -21,6 +21,7 @@
from rpython.rlib.debug import ll_assert
from rpython.rlib.longlong2float import (float2longlong,
DOUBLE_ARRAY_PTR, singlefloat2uint_emulator)
+from rpython.rlib.rarithmetic import r_uint, intmask
import ctypes
CPU = getcpuclass()
@@ -168,7 +169,7 @@
def test_load_byte_zero_extend(self):
adr = self.a.datablockwrapper.malloc_aligned(16, 16)
data = rffi.cast(rffi.CArrayPtr(rffi.ULONG), adr)
- data[0] = rffi.cast(rffi.ULONG,0xffffFFFFffffFF02)
+ data[0] = rffi.cast(rffi.ULONG, intmask(0xffffFFFFffffFF02))
self.a.mc.load_imm(r.r3, adr+7)
self.a.mc.LLGC(r.r2, loc.addr(0,r.r3))
self.a.mc.BCR(con.ANY, r.r14)
@@ -177,7 +178,7 @@
def test_load_byte_and_imm(self):
adr = self.a.datablockwrapper.malloc_aligned(16, 16)
data = rffi.cast(rffi.CArrayPtr(rffi.ULONG), adr)
- data[0] = rffi.cast(rffi.ULONG,0xffffFFFFffff0001)
+ data[0] = rffi.cast(rffi.ULONG, intmask(0xffffFFFFffff0001))
self.a.mc.load_imm(r.r3, adr)
self.a.mc.LG(r.r2, loc.addr(0,r.r3))
self.a.mc.LLGC(r.r2, loc.addr(7,r.r3))
diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py
--- a/rpython/jit/metainterp/test/test_fficall.py
+++ b/rpython/jit/metainterp/test/test_fficall.py
@@ -11,7 +11,7 @@
from rpython.rlib.jit_libffi import (types, CIF_DESCRIPTION, FFI_TYPE_PP,
jit_ffi_call)
from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.rarithmetic import intmask, r_longlong, r_singlefloat
+from rpython.rlib.rarithmetic import intmask, r_longlong, r_singlefloat, r_uint
from rpython.rlib.longlong2float import float2longlong
def get_description(atypes, rtype):
@@ -230,8 +230,8 @@
def test_handle_unsigned(self):
self._run([types.ulong], types.ulong,
- [rffi.cast(rffi.ULONG, sys.maxint + 91348)],
- rffi.cast(rffi.ULONG, sys.maxint + 4242))
+ [rffi.cast(rffi.ULONG, r_uint(sys.maxint + 91348))],
+ rffi.cast(rffi.ULONG, r_uint(sys.maxint + 4242)))
def test_handle_unsignedchar(self):
self._run([types.uint8], types.uint8,
diff --git a/rpython/jit/metainterp/test/test_memmgr.py b/rpython/jit/metainterp/test/test_memmgr.py
--- a/rpython/jit/metainterp/test/test_memmgr.py
+++ b/rpython/jit/metainterp/test/test_memmgr.py
@@ -248,8 +248,8 @@
tokens = [t() for t in get_stats().jitcell_token_wrefs]
# Some loops have been freed
assert None in tokens
- # Loop with number 0, h(), has not been freed
- assert 0 in [t.number for t in tokens if t]
+ # Loop with number 1, h(), has not been freed
+ assert 1 in [t.number for t in tokens if t]
# ____________________________________________________________
diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py
--- a/rpython/jit/tl/tla/targettla.py
+++ b/rpython/jit/tl/tla/targettla.py
@@ -4,9 +4,16 @@
def entry_point(args):
- """Main entry point of the stand-alone executable:
- takes a list of strings and returns the exit code.
- """
+ for i in range(len(argv)):
+ if argv[i] == "--jit":
+ if len(argv) == i + 1:
+ print "missing argument after --jit"
+ return 2
+ jitarg = argv[i + 1]
+ del argv[i:i+2]
+ jit.set_user_param(jitdriver, jitarg)
+ break
+
if len(args) < 3:
print "Usage: %s filename x" % (args[0],)
return 2
@@ -26,7 +33,7 @@
return bytecode
def target(driver, args):
- return entry_point, None
+ return entry_point
# ____________________________________________________________
diff --git a/rpython/jit/tl/tla/tla.py b/rpython/jit/tl/tla/tla.py
--- a/rpython/jit/tl/tla/tla.py
+++ b/rpython/jit/tl/tla/tla.py
@@ -60,19 +60,34 @@
# ____________________________________________________________
-CONST_INT = 1
-POP = 2
-ADD = 3
-RETURN = 4
-JUMP_IF = 5
-DUP = 6
-SUB = 7
-NEWSTR = 8
+OPNAMES = []
+HASARG = []
+
+def define_op(name, has_arg=False):
+ globals()[name] = len(OPNAMES)
+ OPNAMES.append(name)
+ HASARG.append(has_arg)
+
+define_op("CONST_INT", True)
+define_op("POP")
+define_op("ADD")
+define_op("RETURN")
+define_op("JUMP_IF", True)
+define_op("DUP")
+define_op("SUB")
+define_op("NEWSTR", True)
+
# ____________________________________________________________
def get_printable_location(pc, bytecode):
- return str(pc)
+ op = ord(bytecode[pc])
+ name = OPNAMES[op]
+ if HASARG[op]:
+ arg = str(ord(bytecode[pc + 1]))
+ else:
+ arg = ''
+ return "%s: %s %s" % (pc, name, arg)
jitdriver = JitDriver(greens=['pc', 'bytecode'],
reds=['self'],
diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py
--- a/rpython/rlib/rjitlog/rjitlog.py
+++ b/rpython/rlib/rjitlog/rjitlog.py
@@ -3,6 +3,7 @@
import weakref
import struct
import os
+import platform
from rpython.rlib import jit
from rpython.tool.udir import udir
from rpython.tool.version import rpythonroot
@@ -282,14 +283,16 @@
IS_32_BIT = sys.maxint == 2**31-1
+MACHINE_NAME = platform.machine()
+
def assemble_header():
version = JITLOG_VERSION_16BIT_LE
count = len(resoperations.opname)
is_32bit = chr(0x1)
if not IS_32_BIT:
is_32bit = chr(0x0)
- content = [version, is_32bit, MARK_RESOP_META,
- encode_le_16bit(count)]
+ content = [version, is_32bit, encode_str(MACHINE_NAME),
+ MARK_RESOP_META, encode_le_16bit(count)]
for opnum, opname in resoperations.opname.items():
content.append(encode_le_16bit(opnum))
content.append(encode_str(opname.lower()))
diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py
--- a/rpython/rlib/test/test_rarithmetic.py
+++ b/rpython/rlib/test/test_rarithmetic.py
@@ -404,6 +404,8 @@
def test_int_c_div_mod(x, y):
assert int_c_div(~x, y) == -(abs(~x) // y)
assert int_c_div( x,-y) == -(x // y)
+ if (x, y) == (sys.maxint, 1):
+ py.test.skip("would overflow")
assert int_c_div(~x,-y) == +(abs(~x) // y)
for x1 in [x, ~x]:
for y1 in [y, -y]:
diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py
--- a/rpython/rtyper/lltypesystem/lltype.py
+++ b/rpython/rtyper/lltypesystem/lltype.py
@@ -812,8 +812,10 @@
if tp is long:
if -maxint-1 <= val <= maxint:
return Signed
+ elif longlongmask(val) == val:
+ return SignedLongLong
else:
- return SignedLongLong
+ raise OverflowError("integer %r is out of bounds" % (val,))
if tp is bool:
return Bool
if issubclass(tp, base_int):
diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py
--- a/rpython/rtyper/rrange.py
+++ b/rpython/rtyper/rrange.py
@@ -199,8 +199,11 @@
self.r_baseiter = r_baseiter
self.lowleveltype = r_baseiter.lowleveltype
# only supports for now enumerate() on sequence types whose iterators
- # have a method ll_getnextindex. It's easy to add one for most
- # iterator types, but I didn't do it so far.
+ # have a method ll_getnextindex. It could be added for most
+ # iterator types, but it's a bit messy for no clear benefit.
+ if not hasattr(r_baseiter, 'll_getnextindex'):
+ raise TyperError("not implemented for now: enumerate(x) where x "
+ "is not a regular list (got %r)" % (r_baseiter,))
self.ll_getnextindex = r_baseiter.ll_getnextindex
def rtype_next(self, hop):
diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py
--- a/rpython/rtyper/rstr.py
+++ b/rpython/rtyper/rstr.py
@@ -591,7 +591,9 @@
class __extend__(pairtype(IntegerRepr, AbstractStringRepr)):
def rtype_mul((r_int, r_str), hop):
- return pair(r_str, r_int).rtype_mul(hop)
+ str_repr = r_str.repr
+ v_int, v_str = hop.inputargs(Signed, str_repr)
+ return hop.gendirectcall(r_str.ll.ll_str_mul, v_str, v_int)
rtype_inplace_mul = rtype_mul
diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py
--- a/rpython/rtyper/test/test_rstr.py
+++ b/rpython/rtyper/test/test_rstr.py
@@ -220,11 +220,12 @@
const = self.const
def fn(i, mul):
s = ["", "a", "aba"][i]
- return s * mul
+ return s * mul + mul * s
for i in xrange(3):
for m in [0, 1, 4]:
+ res1 = fn(i, m)
res = self.interpret(fn, [i, m])
- assert self.ll_to_string(res) == fn(i, m)
+ assert self.ll_to_string(res) == res1
def test_is_none(self):
const = self.const
diff --git a/rpython/tool/error.py b/rpython/tool/error.py
--- a/rpython/tool/error.py
+++ b/rpython/tool/error.py
@@ -158,6 +158,8 @@
@jit.elidable
def offset2lineno(c, stopat):
+ # even position in lnotab denote byte increments, odd line increments.
+ # see dis.findlinestarts in the python std. library for more details
tab = c.co_lnotab
line = c.co_firstlineno
addr = 0
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -422,14 +422,11 @@
mk.definition('PROFOPT', profopt)
rules = [
- ('clean', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'),
- ('clean_noprof', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES)'),
('debug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT" debug_target'),
('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'),
('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'),
('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(DEFAULT_TARGET)'),
('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'),
- ('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DMAX_STACK_SIZE=8192000 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'),
('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(DEFAULT_TARGET)'),
]
if self.has_profopt():
@@ -443,6 +440,17 @@
for rule in rules:
mk.rule(*rule)
+ if self.translator.platform.name == 'msvc':
+ mk.rule('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -Od -DMAX_STACK_SIZE=8192000 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'),
+ wildcards = '..\*.obj ..\*.pdb ..\*.lib ..\*.dll ..\*.manifest ..\*.exp *.pch'
+ cmd = r'del /s %s $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES)' % wildcards
+ mk.rule('clean', '', cmd + ' *.gc?? ..\module_cache\*.gc??')
+ mk.rule('clean_noprof', '', cmd)
+ else:
+ mk.rule('lldebug0','', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -O0 -DMAX_STACK_SIZE=8192000 -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'),
+ mk.rule('clean', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??')
+ mk.rule('clean_noprof', '', 'rm -f $(OBJECTS) $(DEFAULT_TARGET) $(TARGET) $(GCMAPFILES) $(ASMFILES)')
+
#XXX: this conditional part is not tested at all
if self.config.translation.gcrootfinder == 'asmgcc':
if self.translator.platform.name == 'msvc':
@@ -507,7 +515,7 @@
else:
mk.definition('DEBUGFLAGS', '-O1 -g')
if self.translator.platform.name == 'msvc':
- mk.rule('debug_target', '$(DEFAULT_TARGET)', 'rem')
+ mk.rule('debug_target', '$(DEFAULT_TARGET) $(WTARGET)', 'rem')
else:
mk.rule('debug_target', '$(DEFAULT_TARGET)', '#')
mk.write()
From pypy.commits at gmail.com Wed Aug 3 07:34:45 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 03 Aug 2016 04:34:45 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: merged default,
added message to exception thrown in vecopt
Message-ID: <57a1d6d5.c3cb1c0a.fb5be.a50f@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86001:b6da37166dc2
Date: 2016-08-03 13:33 +0200
http://bitbucket.org/pypy/pypy/changeset/b6da37166dc2/
Log: merged default, added message to exception thrown in vecopt
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -109,3 +109,13 @@
.. branch: jitlog-exact-source-lines
Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -14,8 +14,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -28,6 +28,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -70,9 +73,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("str or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -260,8 +261,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = w_init.str_w(space)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -297,10 +306,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -401,21 +401,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -20,7 +20,8 @@
def _getfunc(space, CDLL, w_name, w_argtypes, w_restype):
argtypes_w, argtypes, w_restype, restype = unpack_argtypes(
space, w_argtypes, w_restype)
- if space.isinstance_w(w_name, space.w_str):
+ if (space.isinstance_w(w_name, space.w_str) or
+ space.isinstance_w(w_name, space.w_unicode)):
name = space.str_w(w_name)
try:
func = CDLL.cdll.getpointer(name, argtypes, restype,
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -135,7 +135,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -147,7 +147,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -181,7 +181,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -193,7 +193,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -228,7 +228,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def RAND_status(space):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -265,6 +265,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -276,6 +277,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
while frame.i > 3:
- jitdriver.can_enter_jit(frame=frame, total=total, j=j)
- jitdriver.jit_merge_point(frame=frame, total=total, j=j)
+ jitdriver.can_enter_jit(frame=frame, total=total, j=j,
+ prev_s=prev_s)
+ jitdriver.jit_merge_point(frame=frame, total=total, j=j,
+ prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
@@ -82,6 +86,11 @@
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
+ s = rstr.mallocstr(16)
+ rgc.ll_write_final_null_char(s)
+ rgc.ll_write_final_null_char(prev_s)
+ if (frame.i & 3) == 0:
+ prev_s = s
return chr(total % 253)
#
class Virt2(object):
diff --git a/rpython/jit/backend/llsupport/vector_ext.py b/rpython/jit/backend/llsupport/vector_ext.py
--- a/rpython/jit/backend/llsupport/vector_ext.py
+++ b/rpython/jit/backend/llsupport/vector_ext.py
@@ -192,9 +192,11 @@
continue
curvecinfo = forwarded_vecinfo(arg)
if curvecinfo.bytesize != bytesize:
- raise NotAVectorizeableLoop()
+ raise NotAVectorizeableLoop("op match size first type failed %d != %d" % \
+ (curvecinfo.bytesize != bytesize))
if curvecinfo.datatype != datatype:
- raise NotAVectorizeableLoop()
+ raise NotAVectorizeableLoop("op match size first type failed (datatype). %s != %s" % \
+ (curvecinfo.datatype != datatype))
return None
TR_ANY = TypeRestrict()
diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -995,6 +995,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale)
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1685,25 +1685,6 @@
dest_addr = AddressLoc(base_loc, ofs_loc, scale, offset_loc.value)
self.save_into_mem(dest_addr, value_loc, size_loc)
- def genop_discard_strsetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
- self.cpu.translate_support_code)
- assert itemsize == 1
- dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize)
- self.mc.MOV8(dest_addr, val_loc.lowest8bits())
-
- def genop_discard_unicodesetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
- self.cpu.translate_support_code)
- if itemsize == 4:
- self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc)
- elif itemsize == 2:
- self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc)
- else:
- assert 0, itemsize
-
# genop_discard_setfield_raw = genop_discard_setfield_gc
def genop_math_read_timestamp(self, op, arglocs, resloc):
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1219,6 +1219,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self.assembler.load_effective_addr(ofsloc, ofs_items, scale,
resloc, baseloc)
diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -991,6 +991,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
# src and src_len are tmp registers
diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py
--- a/rpython/jit/metainterp/jitexc.py
+++ b/rpython/jit/metainterp/jitexc.py
@@ -62,8 +62,10 @@
self.red_int, self.red_ref, self.red_float)
class NotAVectorizeableLoop(JitException):
+ def __init__(self, msg=""):
+ self.msg = msg
def __str__(self):
- return 'NotAVectorizeableLoop()'
+ return 'NotAVectorizeableLoop(%s)' % self.msg
class NotAProfitableLoop(JitException):
def __str__(self):
diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
--- a/rpython/jit/metainterp/optimizeopt/vector.py
+++ b/rpython/jit/metainterp/optimizeopt/vector.py
@@ -146,12 +146,14 @@
#
info.label_op = loop.label
return info, loop.finaloplist(jitcell_token=jitcell_token, reset_label_token=False)
- except NotAVectorizeableLoop:
+ except NotAVectorizeableLoop as e:
debug_stop("vec-opt-loop")
+ debug_print("failed to vectorize loop. reason: %s" % str(e))
# vectorization is not possible
return loop_info, version.loop.finaloplist()
except NotAProfitableLoop:
debug_stop("vec-opt-loop")
+ debug_print("failed to vectorize loop, cost model indicated it is not profitable")
# cost model says to skip this loop
return loop_info, version.loop.finaloplist()
except Exception as e:
@@ -231,7 +233,8 @@
if vsize == 0 or byte_count == 0 or loop.label.getopnum() != rop.LABEL:
# stop, there is no chance to vectorize this trace
# we cannot optimize normal traces (if there is no label)
- raise NotAVectorizeableLoop()
+ raise NotAVectorizeableLoop("vsize %d byte_count %d not label? %d" % \
+ (vsize, byte_count, loop.label.getopnum() != rop.LABEL))
# find index guards and move to the earliest position
graph = self.analyse_index_calculations(loop)
@@ -253,7 +256,7 @@
state = VecScheduleState(graph, self.packset, self.cpu, costmodel)
self.schedule(state)
if not state.profitable():
- raise NotAProfitableLoop()
+ raise NotAProfitableLoop
return graph.index_vars
def unroll_loop_iterations(self, loop, unroll_count):
@@ -429,7 +432,7 @@
intersecting edges.
"""
if len(self.packset.packs) == 0:
- raise NotAVectorizeableLoop()
+ raise NotAVectorizeableLoop("packset is empty")
i = 0
j = 0
end_ij = len(self.packset.packs)
@@ -661,7 +664,7 @@
if forward and origin_pack.is_accumulating():
# in this case the splitted accumulator must
# be combined. This case is not supported
- raise NotAVectorizeableLoop()
+ raise NotAVectorizeableLoop("splitted accum must be flushed here (not supported)")
#
if self.contains_pair(lnode, rnode):
return None
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -1381,7 +1381,7 @@
return result
def indirection(arg):
- return interp(arg)
+ return interp(arg) + 1
def run_interp(n):
f = hint(Frame(n), access_directly=True)
diff --git a/rpython/memory/gcheader.py b/rpython/memory/gcheader.py
--- a/rpython/memory/gcheader.py
+++ b/rpython/memory/gcheader.py
@@ -11,7 +11,21 @@
def __init__(self, HDR):
"""NOT_RPYTHON"""
self.HDR = HDR
- self.obj2header = weakref.WeakKeyDictionary()
+ #
+ # The following used to be a weakref.WeakKeyDictionary(), but
+ # the problem is that if you have a gcobj which has already a
+ # weakref cached on it and the hash already cached in that
+ # weakref, and later the hash of the gcobj changes (because it
+ # is ll2ctypes-ified), then that gcobj cannot be used as a key
+ # in a WeakKeyDictionary any more: from this point on,
+ # 'ref(gcobj)' and 'ref(gcobj, callback)' return two objects
+ # with different hashes... and so e.g. the sequence of
+ # operations 'obj2header[x]=y; assert x in obj2header' fails.
+ #
+ # Instead, just use a regular dictionary and hope that not too
+ # many objects would be reclaimed in a given GCHeaderBuilder
+ # instance.
+ self.obj2header = {}
self.size_gc_header = llmemory.GCHeaderOffset(self)
def header_of_object(self, gcptr):
diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py
--- a/rpython/rlib/objectmodel.py
+++ b/rpython/rlib/objectmodel.py
@@ -281,6 +281,10 @@
return lltype.Signed
malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0)
+_translated_to_c = CDefinedIntSymbolic('1 /*_translated_to_c*/', default=0)
+
+def we_are_translated_to_c():
+ return we_are_translated() and _translated_to_c
# ____________________________________________________________
diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py
--- a/rpython/rlib/rdtoa.py
+++ b/rpython/rlib/rdtoa.py
@@ -56,22 +56,24 @@
raise MemoryError
end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
try:
- ll_input = rffi.str2charp(input)
+ # note: don't use the class scoped_view_charp here, it
+ # break some tests because this function is used by the GC
+ ll_input, flag = rffi.get_nonmovingbuffer_final_null(input)
try:
result = dg_strtod(ll_input, end_ptr)
endpos = (rffi.cast(lltype.Signed, end_ptr[0]) -
rffi.cast(lltype.Signed, ll_input))
-
- if endpos == 0 or endpos < len(input):
- raise ValueError("invalid input at position %d" % (endpos,))
-
- return result
finally:
- rffi.free_charp(ll_input)
+ rffi.free_nonmovingbuffer(input, ll_input, flag)
finally:
lltype.free(end_ptr, flavor='raw')
+ if endpos == 0 or endpos < len(input):
+ raise ValueError("invalid input at position %d" % (endpos,))
+
+ return result
+
lower_special_strings = ['inf', '+inf', '-inf', 'nan']
upper_special_strings = ['INF', '+INF', '-INF', 'NAN']
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -1268,3 +1268,26 @@
ptr = lltype.direct_arrayitems(array)
# ptr is a Ptr(FixedSizeArray(Char, 1)). Cast it to a rffi.CCHARP
return rffi.cast(rffi.CCHARP, ptr)
+
+ at jit.dont_look_inside
+ at no_collect
+ at specialize.ll()
+def ll_write_final_null_char(s):
+ """'s' is a low-level STR; writes a terminating NULL character after
+ the other characters in 's'. Warning, this only works because of
+ the 'extra_item_after_alloc' hack inside the definition of STR.
+ """
+ from rpython.rtyper.lltypesystem import rffi
+ PSTR = lltype.typeOf(s)
+ assert has_final_null_char(PSTR) == 1
+ n = llmemory.offsetof(PSTR.TO, 'chars')
+ n += llmemory.itemoffsetof(PSTR.TO.chars, 0)
+ n = llmemory.raw_malloc_usage(n)
+ n += len(s.chars)
+ # no GC operation from here!
+ ptr = rffi.cast(rffi.CCHARP, s)
+ ptr[n] = '\x00'
+
+ at specialize.memo()
+def has_final_null_char(PSTR):
+ return PSTR.TO.chars._hints.get('extra_item_after_alloc', 0)
diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/ll2ctypes.py
@@ -250,7 +250,9 @@
if not A._hints.get('nolength'):
_fields_ = [('length', lentype),
- ('items', max_n * ctypes_item)]
+ ('items',
+ (max_n + A._hints.get('extra_item_after_alloc', 0))
+ * ctypes_item)]
else:
_fields_ = [('items', max_n * ctypes_item)]
@@ -695,6 +697,9 @@
# we have no clue, so we allow whatever index
return 0, maxint
+ def shrinklength(self, newlength):
+ raise NotImplementedError
+
def getitem(self, index, uninitialized_ok=False):
res = self._storage.contents._getitem(index, boundscheck=False)
if isinstance(self._TYPE.OF, lltype.ContainerType):
diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py
--- a/rpython/rtyper/lltypesystem/llmemory.py
+++ b/rpython/rtyper/lltypesystem/llmemory.py
@@ -304,8 +304,15 @@
return cast_ptr_to_adr(p)
def raw_memcopy(self, srcadr, dstadr):
- # should really copy the length field, but we can't
- pass
+ # copy the length field, if we can
+ srclen = srcadr.ptr._obj.getlength()
+ dstlen = dstadr.ptr._obj.getlength()
+ if dstlen != srclen:
+ assert dstlen > srclen, "can't increase the length"
+ # a decrease in length occurs in the GC tests when copying a STR:
+ # the copy is initially allocated with really one extra char,
+ # the 'extra_item_after_alloc', and must be fixed.
+ dstadr.ptr._obj.shrinklength(srclen)
class ArrayLengthOffset(AddressOffset):
@@ -390,11 +397,23 @@
else:
raise Exception("don't know how to take the size of a %r"%TYPE)
+ at specialize.memo()
+def extra_item_after_alloc(ARRAY):
+ assert isinstance(ARRAY, lltype.Array)
+ return ARRAY._hints.get('extra_item_after_alloc', 0)
+
@specialize.arg(0)
def sizeof(TYPE, n=None):
+ """Return the symbolic size of TYPE.
+ For a Struct with no varsized part, it must be called with n=None.
+ For an Array or a Struct with a varsized part, it is the number of items.
+ There is a special case to return 1 more than requested if the array
+ has the hint 'extra_item_after_alloc' set to 1.
+ """
if n is None:
return _sizeof_none(TYPE)
elif isinstance(TYPE, lltype.Array):
+ n += extra_item_after_alloc(TYPE)
return itemoffsetof(TYPE) + _sizeof_none(TYPE.OF) * n
else:
return _sizeof_int(TYPE, n)
@@ -1036,7 +1055,7 @@
_reccopy(subsrc, subdst)
else:
# this is a hack XXX de-hack this
- llvalue = source._obj.getitem(i, uninitialized_ok=True)
+ llvalue = source._obj.getitem(i, uninitialized_ok=2)
if not isinstance(llvalue, lltype._uninitialized):
dest._obj.setitem(i, llvalue)
elif isinstance(T, lltype.Struct):
diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py
--- a/rpython/rtyper/lltypesystem/lltype.py
+++ b/rpython/rtyper/lltypesystem/lltype.py
@@ -1926,14 +1926,29 @@
return 0, stop
def getitem(self, index, uninitialized_ok=False):
- v = self.items[index]
+ try:
+ v = self.items[index]
+ except IndexError:
+ if (index == len(self.items) and uninitialized_ok == 2 and
+ self._TYPE._hints.get('extra_item_after_alloc')):
+ # special case: reading the extra final char returns
+ # an uninitialized, if 'uninitialized_ok==2'
+ return _uninitialized(self._TYPE.OF)
+ raise
if isinstance(v, _uninitialized) and not uninitialized_ok:
raise UninitializedMemoryAccess("%r[%s]"%(self, index))
return v
def setitem(self, index, value):
assert typeOf(value) == self._TYPE.OF
- self.items[index] = value
+ try:
+ self.items[index] = value
+ except IndexError:
+ if (index == len(self.items) and value == '\x00' and
+ self._TYPE._hints.get('extra_item_after_alloc')):
+ # special case: writing NULL to the extra final char
+ return
+ raise
assert not '__dict__' in dir(_array)
assert not '__dict__' in dir(_struct)
diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py
--- a/rpython/rtyper/lltypesystem/rffi.py
+++ b/rpython/rtyper/lltypesystem/rffi.py
@@ -15,7 +15,7 @@
from rpython.rtyper.tool.rfficache import platform, sizeof_c_type
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.annlowlevel import llhelper
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, we_are_translated_to_c
from rpython.rlib.rstring import StringBuilder, UnicodeBuilder, assert_str0
from rpython.rlib import jit
from rpython.rtyper.lltypesystem import llmemory
@@ -232,40 +232,36 @@
call_external_function = jit.dont_look_inside(
call_external_function)
+ def _oops():
+ raise AssertionError("can't pass (any more) a unicode string"
+ " directly to a VOIDP argument")
+ _oops._annspecialcase_ = 'specialize:memo'
+
unrolling_arg_tps = unrolling_iterable(enumerate(args))
def wrapper(*args):
real_args = ()
+ # XXX 'to_free' leaks if an allocation fails with MemoryError
+ # and was not the first in this function
to_free = ()
for i, TARGET in unrolling_arg_tps:
arg = args[i]
- freeme = None
- if TARGET == CCHARP:
+ if TARGET == CCHARP or TARGET is VOIDP:
if arg is None:
arg = lltype.nullptr(CCHARP.TO) # None => (char*)NULL
- freeme = arg
+ to_free = to_free + (arg, '\x04')
elif isinstance(arg, str):
- arg = str2charp(arg)
- # XXX leaks if a str2charp() fails with MemoryError
- # and was not the first in this function
- freeme = arg
+ tup = get_nonmovingbuffer_final_null(arg)
+ to_free = to_free + tup
+ arg = tup[0]
+ elif isinstance(arg, unicode):
+ _oops()
elif TARGET == CWCHARP:
if arg is None:
arg = lltype.nullptr(CWCHARP.TO) # None => (wchar_t*)NULL
- freeme = arg
+ to_free = to_free + (arg,)
elif isinstance(arg, unicode):
arg = unicode2wcharp(arg)
- # XXX leaks if a unicode2wcharp() fails with MemoryError
- # and was not the first in this function
- freeme = arg
- elif TARGET is VOIDP:
- if arg is None:
- arg = lltype.nullptr(VOIDP.TO)
- elif isinstance(arg, str):
- arg = str2charp(arg)
- freeme = arg
- elif isinstance(arg, unicode):
- arg = unicode2wcharp(arg)
- freeme = arg
+ to_free = to_free + (arg,)
elif _isfunctype(TARGET) and not _isllptr(arg):
# XXX pass additional arguments
use_gil = invoke_around_handlers
@@ -283,11 +279,22 @@
or TARGET is lltype.Bool)):
arg = cast(TARGET, arg)
real_args = real_args + (arg,)
- to_free = to_free + (freeme,)
res = call_external_function(*real_args)
for i, TARGET in unrolling_arg_tps:
- if to_free[i]:
- lltype.free(to_free[i], flavor='raw')
+ arg = args[i]
+ if TARGET == CCHARP or TARGET is VOIDP:
+ if arg is None:
+ to_free = to_free[2:]
+ elif isinstance(arg, str):
+ free_nonmovingbuffer(arg, to_free[0], to_free[1])
+ to_free = to_free[2:]
+ elif TARGET == CWCHARP:
+ if arg is None:
+ to_free = to_free[1:]
+ elif isinstance(arg, unicode):
+ free_wcharp(to_free[0])
+ to_free = to_free[1:]
+ assert len(to_free) == 0
if rarithmetic.r_int is not r_int:
if result is INT:
return cast(lltype.Signed, res)
@@ -816,52 +823,69 @@
string is already nonmovable or could be pinned. Must be followed by a
free_nonmovingbuffer call.
- First bool returned indicates if 'data' was pinned. Second bool returned
- indicates if we did a raw alloc because pinning failed. Both bools
- should never be true at the same time.
+ Also returns a char:
+ * \4: no pinning, returned pointer is inside 'data' which is nonmovable
+ * \5: 'data' was pinned, returned pointer is inside
+ * \6: pinning failed, returned pointer is raw malloced
+
+ For strings (not unicodes), the len()th character of the resulting
+ raw buffer is available, but not initialized. Use
+ get_nonmovingbuffer_final_null() instead of get_nonmovingbuffer()
+ to get a regular null-terminated "char *".
"""
lldata = llstrtype(data)
count = len(data)
- pinned = False
- if rgc.can_move(data):
- if rgc.pin(data):
- pinned = True
+ if we_are_translated_to_c() and not rgc.can_move(data):
+ flag = '\x04'
+ else:
+ if we_are_translated_to_c() and rgc.pin(data):
+ flag = '\x05'
else:
- buf = lltype.malloc(TYPEP.TO, count, flavor='raw')
+ buf = lltype.malloc(TYPEP.TO, count + (TYPEP is CCHARP),
+ flavor='raw')
copy_string_to_raw(lldata, buf, 0, count)
- return buf, pinned, True
+ return buf, '\x06'
# ^^^ raw malloc used to get a nonmovable copy
#
- # following code is executed if:
+ # following code is executed after we're translated to C, if:
# - rgc.can_move(data) and rgc.pin(data) both returned true
# - rgc.can_move(data) returned false
data_start = cast_ptr_to_adr(lldata) + \
offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0)
- return cast(TYPEP, data_start), pinned, False
+ return cast(TYPEP, data_start), flag
# ^^^ already nonmovable. Therefore it's not raw allocated nor
# pinned.
get_nonmovingbuffer._always_inline_ = 'try' # get rid of the returned tuple
get_nonmovingbuffer._annenforceargs_ = [strtype]
- # (str, char*, bool, bool) -> None
+ @jit.dont_look_inside
+ def get_nonmovingbuffer_final_null(data):
+ tup = get_nonmovingbuffer(data)
+ buf, flag = tup
+ buf[len(data)] = lastchar
+ return tup
+ get_nonmovingbuffer_final_null._always_inline_ = 'try'
+ get_nonmovingbuffer_final_null._annenforceargs_ = [strtype]
+
+ # (str, char*, char) -> None
# Can't inline this because of the raw address manipulation.
@jit.dont_look_inside
- def free_nonmovingbuffer(data, buf, is_pinned, is_raw):
+ def free_nonmovingbuffer(data, buf, flag):
"""
- Keep 'data' alive and unpin it if it was pinned ('is_pinned' is true).
- Otherwise free the non-moving copy ('is_raw' is true).
+ Keep 'data' alive and unpin it if it was pinned (flag==\5).
+ Otherwise free the non-moving copy (flag==\6).
"""
- if is_pinned:
+ if flag == '\x05':
rgc.unpin(data)
- if is_raw:
+ if flag == '\x06':
lltype.free(buf, flavor='raw')
- # if is_pinned and is_raw are false: data was already nonmovable,
+ # if flag == '\x04': data was already nonmovable,
# we have nothing to clean up
keepalive_until_here(data)
- free_nonmovingbuffer._annenforceargs_ = [strtype, None, bool, bool]
+ free_nonmovingbuffer._annenforceargs_ = [strtype, None, None]
# int -> (char*, str, int)
# Can't inline this because of the raw address manipulation.
@@ -947,18 +971,19 @@
return (str2charp, free_charp, charp2str,
get_nonmovingbuffer, free_nonmovingbuffer,
+ get_nonmovingbuffer_final_null,
alloc_buffer, str_from_buffer, keep_buffer_alive_until_here,
charp2strn, charpsize2str, str2chararray, str2rawmem,
)
(str2charp, free_charp, charp2str,
- get_nonmovingbuffer, free_nonmovingbuffer,
+ get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null,
alloc_buffer, str_from_buffer, keep_buffer_alive_until_here,
charp2strn, charpsize2str, str2chararray, str2rawmem,
) = make_string_mappings(str)
(unicode2wcharp, free_wcharp, wcharp2unicode,
- get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer,
+ get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, __not_usable,
alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here,
wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem,
) = make_string_mappings(unicode)
@@ -1194,10 +1219,28 @@
def __init__(self, data):
self.data = data
def __enter__(self):
- self.buf, self.pinned, self.is_raw = get_nonmovingbuffer(self.data)
+ self.buf, self.flag = get_nonmovingbuffer(self.data)
return self.buf
def __exit__(self, *args):
- free_nonmovingbuffer(self.data, self.buf, self.pinned, self.is_raw)
+ free_nonmovingbuffer(self.data, self.buf, self.flag)
+ __init__._always_inline_ = 'try'
+ __enter__._always_inline_ = 'try'
+ __exit__._always_inline_ = 'try'
+
+class scoped_view_charp:
+ """Returns a 'char *' that (tries to) point inside the given RPython
+ string (which must not be None). You can replace scoped_str2charp()
+ with scoped_view_charp() in all places that guarantee that the
+ content of the 'char[]' array will not be modified.
+ """
+ def __init__(self, data):
+ self.data = data
+ __init__._annenforceargs_ = [None, annmodel.SomeString(can_be_None=False)]
+ def __enter__(self):
+ self.buf, self.flag = get_nonmovingbuffer_final_null(self.data)
+ return self.buf
+ def __exit__(self, *args):
+ free_nonmovingbuffer(self.data, self.buf, self.flag)
__init__._always_inline_ = 'try'
__enter__._always_inline_ = 'try'
__exit__._always_inline_ = 'try'
@@ -1206,10 +1249,10 @@
def __init__(self, data):
self.data = data
def __enter__(self):
- self.buf, self.pinned, self.is_raw = get_nonmoving_unicodebuffer(self.data)
+ self.buf, self.flag = get_nonmoving_unicodebuffer(self.data)
return self.buf
def __exit__(self, *args):
- free_nonmoving_unicodebuffer(self.data, self.buf, self.pinned, self.is_raw)
+ free_nonmoving_unicodebuffer(self.data, self.buf, self.flag)
__init__._always_inline_ = 'try'
__enter__._always_inline_ = 'try'
__exit__._always_inline_ = 'try'
diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -1238,7 +1238,8 @@
# ____________________________________________________________
STR.become(GcStruct('rpy_string', ('hash', Signed),
- ('chars', Array(Char, hints={'immutable': True})),
+ ('chars', Array(Char, hints={'immutable': True,
+ 'extra_item_after_alloc': 1})),
adtmeths={'malloc' : staticAdtMethod(mallocstr),
'empty' : staticAdtMethod(emptystrfun),
'copy_contents' : staticAdtMethod(copy_string_contents),
diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py
--- a/rpython/rtyper/lltypesystem/test/test_rffi.py
+++ b/rpython/rtyper/lltypesystem/test/test_rffi.py
@@ -516,7 +516,7 @@
def test_nonmovingbuffer(self):
d = 'some cool data that should not move'
def f():
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
counter = 0
for i in range(len(d)):
@@ -524,7 +524,7 @@
counter += 1
return counter
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
assert f() == len(d)
fn = self.compile(f, [], gcpolicy='ref')
assert fn() == len(d)
@@ -534,13 +534,13 @@
def f():
counter = 0
for n in range(32):
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
return counter
fn = self.compile(f, [], gcpolicy='semispace')
# The semispace gc uses raw_malloc for its internal data structs
@@ -555,13 +555,13 @@
def f():
counter = 0
for n in range(32):
- buf, is_pinned, is_raw = get_nonmovingbuffer(d)
+ buf, flag = get_nonmovingbuffer(d)
try:
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
finally:
- free_nonmovingbuffer(d, buf, is_pinned, is_raw)
+ free_nonmovingbuffer(d, buf, flag)
return counter
fn = self.compile(f, [], gcpolicy='incminimark')
# The incminimark gc uses raw_malloc for its internal data structs
@@ -835,3 +835,11 @@
if hasattr(rffi, '__INT128_T'):
value = 0xAAAABBBBCCCCDDDD
assert cast(rffi.__INT128_T, r_uint64(value)) == value
+
+def test_scoped_view_charp():
+ s = 'bar'
+ with scoped_view_charp(s) as buf:
+ assert buf[0] == 'b'
+ assert buf[1] == 'a'
+ assert buf[2] == 'r'
+ assert buf[3] == '\x00'
diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py
--- a/rpython/translator/c/node.py
+++ b/rpython/translator/c/node.py
@@ -253,8 +253,11 @@
yield '\t' + cdecl(typename, fname) + ';'
if not self.ARRAY._hints.get('nolength', False):
yield '\tlong length;'
+ varlength = self.varlength
+ if varlength is not None:
+ varlength += self.ARRAY._hints.get('extra_item_after_alloc', 0)
line = '%s;' % cdecl(self.itemtypename,
- 'items[%s]' % deflength(self.varlength))
+ 'items[%s]' % deflength(varlength))
if self.ARRAY.OF is Void: # strange
line = '/* array of void */'
if self.ARRAY._hints.get('nolength', False):
diff --git a/rpython/translator/c/src/precommondefs.h b/rpython/translator/c/src/precommondefs.h
--- a/rpython/translator/c/src/precommondefs.h
+++ b/rpython/translator/c/src/precommondefs.h
@@ -18,9 +18,9 @@
#define _LARGEFILE_SOURCE 1
/* Define on NetBSD to activate all library features */
#define _NETBSD_SOURCE 1
-/* Define to activate features from IEEE Stds 1003.1-2001 */
+/* Define to activate features from IEEE Stds 1003.1-2008 */
#ifndef _POSIX_C_SOURCE
-# define _POSIX_C_SOURCE 200112L
+# define _POSIX_C_SOURCE 200809L
#endif
/* Define on FreeBSD to activate all library features */
#define __BSD_VISIBLE 1
diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py
--- a/rpython/translator/c/test/test_lltyped.py
+++ b/rpython/translator/c/test/test_lltyped.py
@@ -1,4 +1,4 @@
-import py
+import py, random
from rpython.rtyper.lltypesystem.lltype import *
from rpython.rtyper.lltypesystem import rffi
from rpython.translator.c.test.test_genc import compile
@@ -255,28 +255,6 @@
res2 = fn(0)
assert res1 == res2
- def test_null_padding(self):
- py.test.skip("we no longer pad our RPython strings with a final NUL")
- from rpython.rtyper.lltypesystem import llmemory
- from rpython.rtyper.lltypesystem import rstr
- chars_offset = llmemory.FieldOffset(rstr.STR, 'chars') + \
- llmemory.ArrayItemsOffset(rstr.STR.chars)
- # sadly, there's no way of forcing this to fail if the strings
- # are allocated in a region of memory such that they just
- # happen to get a NUL byte anyway :/ (a debug build will
- # always fail though)
- def trailing_byte(s):
- adr_s = llmemory.cast_ptr_to_adr(s)
- return (adr_s + chars_offset).char[len(s)]
- def f(x):
- r = 0
- for i in range(x):
- r += ord(trailing_byte(' '*(100-x*x)))
- return r
- fn = self.getcompiled(f, [int])
- res = fn(10)
- assert res == 0
-
def test_cast_primitive(self):
def f(x):
x = cast_primitive(UnsignedLongLong, x)
@@ -1023,3 +1001,49 @@
assert fn(r_longlong(1)) == True
assert fn(r_longlong(256)) == True
assert fn(r_longlong(2**32)) == True
+
+ def test_extra_item_after_alloc(self):
+ from rpython.rlib import rgc
+ from rpython.rtyper.lltypesystem import lltype
+ from rpython.rtyper.lltypesystem import rstr
+ # all STR objects should be allocated with enough space for one
+ # extra char. Check this for prebuilt strings, and for dynamically
+ # allocated ones with the default GC for tests. Use strings of 8,
+ # 16 and 24 chars because if the extra char is missing, writing to it
+ # is likely to cause corruption in nearby structures.
+ sizes = [random.choice([8, 16, 24]) for i in range(100)]
+ A = lltype.Struct('A', ('x', lltype.Signed))
+ prebuilt = [(rstr.mallocstr(sz),
+ lltype.malloc(A, flavor='raw', immortal=True))
+ for sz in sizes]
+ k = 0
+ for i, (s, a) in enumerate(prebuilt):
+ a.x = i
+ for i in range(len(s.chars)):
+ k += 1
+ if k == 256:
+ k = 1
+ s.chars[i] = chr(k)
+
+ def check(lst):
+ hashes = []
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i
+ rgc.ll_write_final_null_char(s)
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i # check it was not overwritten
+ def f():
+ check(prebuilt)
+ lst1 = []
+ for i, sz in enumerate(sizes):
+ s = rstr.mallocstr(sz)
+ a = lltype.malloc(A, flavor='raw')
+ a.x = i
+ lst1.append((s, a))
+ check(lst1)
+ for _, a in lst1:
+ lltype.free(a, flavor='raw')
+ return 42
+
+ fn = self.getcompiled(f, [])
+ assert fn() == 42
diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py
--- a/rpython/translator/c/test/test_newgc.py
+++ b/rpython/translator/c/test/test_newgc.py
@@ -3,6 +3,7 @@
import os
import sys
import subprocess
+import random
import py
@@ -1468,6 +1469,52 @@
res = self.run('nursery_hash_base')
assert res >= 195
+ def define_extra_item_after_alloc(cls):
+ from rpython.rtyper.lltypesystem import rstr
+ # all STR objects should be allocated with enough space for
+ # one extra char. Check this with our GCs. Use strings of 8,
+ # 16 and 24 chars because if the extra char is missing,
+ # writing to it is likely to cause corruption in nearby
+ # structures.
+ sizes = [random.choice([8, 16, 24]) for i in range(100)]
+ A = lltype.Struct('A', ('x', lltype.Signed))
+ prebuilt = [(rstr.mallocstr(sz),
+ lltype.malloc(A, flavor='raw', immortal=True))
+ for sz in sizes]
+ k = 0
+ for i, (s, a) in enumerate(prebuilt):
+ a.x = i
+ for i in range(len(s.chars)):
+ k += 1
+ if k == 256:
+ k = 1
+ s.chars[i] = chr(k)
+
+ def check(lst):
+ hashes = []
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i
+ rgc.ll_write_final_null_char(s)
+ for i, (s, a) in enumerate(lst):
+ assert a.x == i # check it was not overwritten
+ def fn():
+ check(prebuilt)
+ lst1 = []
+ for i, sz in enumerate(sizes):
+ s = rstr.mallocstr(sz)
+ a = lltype.malloc(A, flavor='raw')
+ a.x = i
+ lst1.append((s, a))
+ check(lst1)
+ for _, a in lst1:
+ lltype.free(a, flavor='raw')
+ return 42
+ return fn
+
+ def test_extra_item_after_alloc(self):
+ res = self.run('extra_item_after_alloc')
+ assert res == 42
+
class TestGenerationalGC(TestSemiSpaceGC):
gcpolicy = "generation"
diff --git a/rpython/translator/tool/test/test_staticsizereport.py b/rpython/translator/tool/test/test_staticsizereport.py
--- a/rpython/translator/tool/test/test_staticsizereport.py
+++ b/rpython/translator/tool/test/test_staticsizereport.py
@@ -67,7 +67,7 @@
(4 * S + 2 * P) + # struct dicttable
(S + 2 * 8192) + # indexes, length 8192, rffi.USHORT
(S + (S + S) * 3840) + # entries, length 3840
- (S + S + 5) * 3840) # 3840 strings with 5 chars each
+ (S + S + 6) * 3840) # 3840 strings with 5 chars each (+1 final)
assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed)
assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(rffi.VOIDP)
From pypy.commits at gmail.com Wed Aug 3 08:00:48 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 03 Aug 2016 05:00:48 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: enforce that a successful
vectorization in the pypy_c tests (micronumpy)
Message-ID: <57a1dcf0.919a1c0a.74348.6672@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86002:57316f5af6ff
Date: 2016-08-03 14:00 +0200
http://bitbucket.org/pypy/pypy/changeset/57316f5af6ff/
Log: enforce that a successful vectorization in the pypy_c tests
(micronumpy)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
--- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
@@ -51,6 +51,10 @@
log = self.run(main, [], vec=0)
assert log.result == vlog.result
assert log.result == result
+ assert log.jit_summary.vecopt_tried == 0
+ assert log.jit_summary.vecopt_success == 0
+ assert vlog.jit_summary.vecopt_tried > 0
+ assert vlog.jit_summary.vecopt_success > 0
arith_comb = [
@@ -88,6 +92,10 @@
log = self.run(main, [], vec=0)
assert log.result == vlog.result
assert log.result == result
+ assert log.jit_summary.vecopt_tried == 0
+ assert log.jit_summary.vecopt_success == 0
+ assert vlog.jit_summary.vecopt_tried > 0
+ assert vlog.jit_summary.vecopt_success > 0
def test_reduce_logical_xor(self):
def main():
diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
--- a/rpython/jit/metainterp/optimizeopt/vector.py
+++ b/rpython/jit/metainterp/optimizeopt/vector.py
@@ -148,7 +148,7 @@
return info, loop.finaloplist(jitcell_token=jitcell_token, reset_label_token=False)
except NotAVectorizeableLoop as e:
debug_stop("vec-opt-loop")
- debug_print("failed to vectorize loop. reason: %s" % str(e))
+ debug_print("failed to vectorize loop. reason: %s" % e.msg)
# vectorization is not possible
return loop_info, version.loop.finaloplist()
except NotAProfitableLoop:
From pypy.commits at gmail.com Wed Aug 3 08:44:38 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 03 Aug 2016 05:44:38 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: initializing the vector ext
just after the cpu
Message-ID: <57a1e736.ca11c30a.76936.a97b@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86003:768f96ea1752
Date: 2016-08-03 14:43 +0200
http://bitbucket.org/pypy/pypy/changeset/768f96ea1752/
Log: initializing the vector ext just after the cpu
diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py
--- a/rpython/jit/backend/ppc/runner.py
+++ b/rpython/jit/backend/ppc/runner.py
@@ -8,7 +8,6 @@
from rpython.jit.backend.ppc.arch import WORD
from rpython.jit.backend.ppc.codebuilder import PPCBuilder
from rpython.jit.backend.ppc import register as r
-from rpython.jit.backend.ppc.detect_feature import detect_vsx
class PPC_CPU(AbstractLLCPU):
diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py
--- a/rpython/jit/metainterp/optimizeopt/__init__.py
+++ b/rpython/jit/metainterp/optimizeopt/__init__.py
@@ -47,10 +47,6 @@
or 'heap' not in enable_opts or 'pure' not in enable_opts):
optimizations.append(OptSimplify(unroll))
- cpu = metainterp_sd.cpu
- if not cpu.vector_ext.is_setup():
- cpu.vector_ext.setup_once(cpu.assembler)
-
return optimizations, unroll
def optimize_trace(metainterp_sd, jitdriver_sd, compile_data, memo=None):
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1857,6 +1857,7 @@
self.jitlog.setup_once()
debug_print(self.jit_starting_line)
self.cpu.setup_once()
+ self.cpu.vector_ext.setup_once(self.cpu.assembler)
if not self.profiler.initialized:
self.profiler.start()
self.profiler.initialized = True
From pypy.commits at gmail.com Wed Aug 3 09:39:31 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 03 Aug 2016 06:39:31 -0700 (PDT)
Subject: [pypy-commit] pypy default: Work around a MSVC bug
Message-ID: <57a1f413.041f1c0a.8b27f.765b@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86004:1512e98263d9
Date: 2016-08-03 15:41 +0200
http://bitbucket.org/pypy/pypy/changeset/1512e98263d9/
Log: Work around a MSVC bug
diff --git a/rpython/translator/c/src/float.h b/rpython/translator/c/src/float.h
--- a/rpython/translator/c/src/float.h
+++ b/rpython/translator/c/src/float.h
@@ -34,10 +34,39 @@
#define OP_CAST_FLOAT_TO_UINT(x,r) r = (Unsigned)(x)
#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x)
#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x)
-#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x)
-#define OP_CAST_ULONGLONG_TO_FLOAT(x,r) r = (double)(x)
+#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = rpy_cast_longlong_to_float(x)
+#define OP_CAST_ULONGLONG_TO_FLOAT(x,r) r = rpy_cast_ulonglong_to_float(x)
#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x)
+#ifdef _WIN32
+/* The purpose of these two functions is to work around a MSVC bug.
+ The expression '(double)131146795334735160LL' will lead to bogus
+ rounding, but apparently everything is fine if we write instead
+ rpy_cast_longlong_to_float(131146795334735160LL). Tested with
+ MSVC 2008. Note that even if the two functions contain just
+ 'return (double)x;' it seems to work on MSVC 2008, but I don't
+ trust that there are no other corner cases.
+ http://stackoverflow.com/questions/33829101/incorrect-double-to-long-conversion
+*/
+static _inline double rpy_cast_longlong_to_float(long long x)
+{
+ unsigned int lo = (unsigned int)x;
+ double result = lo;
+ result += ((int)(x >> 32)) * 4294967296.0;
+ return result;
+}
+static _inline double rpy_cast_ulonglong_to_float(unsigned long long x)
+{
+ unsigned int lo = (unsigned int)x;
+ double result = lo;
+ result += ((unsigned int)(x >> 32)) * 4294967296.0;
+ return result;
+}
+#else
+# define rpy_cast_longlong_to_float(x) ((double)(x))
+# define rpy_cast_ulonglong_to_float(x) ((double)(x))
+#endif
+
#ifdef HAVE_LONG_LONG
#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x)
#define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x)
From pypy.commits at gmail.com Wed Aug 3 12:21:57 2016
From: pypy.commits at gmail.com (rlamy)
Date: Wed, 03 Aug 2016 09:21:57 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Merge obsoleted impl of
W_DictProxyObject into the correct one: fix __new__, __init__ and __repr__
Message-ID: <57a21a25.6814c30a.71dcd.faad@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86005:fcb110da6372
Date: 2016-08-03 17:21 +0100
http://bitbucket.org/pypy/pypy/changeset/fcb110da6372/
Log: Merge obsoleted impl of W_DictProxyObject into the correct one: fix
__new__, __init__ and __repr__
diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/classdict.py
+++ b/pypy/objspace/std/classdict.py
@@ -9,35 +9,6 @@
from pypy.objspace.std.typeobject import unwrap_cell
-class W_DictProxyObject(W_DictObject):
- @staticmethod
- def descr_new(space, w_type, w_mapping):
- if (not space.lookup(w_mapping, "__getitem__") or
- space.isinstance_w(w_mapping, space.w_list) or
- space.isinstance_w(w_mapping, space.w_tuple)):
- raise oefmt(space.w_TypeError,
- "mappingproxy() argument must be a mapping, not %T", w_mapping)
- strategy = space.fromcache(MappingProxyStrategy)
- storage = strategy.erase(w_mapping)
- w_obj = space.allocate_instance(W_DictProxyObject, w_type)
- W_DictProxyObject.__init__(w_obj, space, strategy, storage)
- return w_obj
-
- def descr_init(self, space, __args__):
- pass
-
- def descr_repr(self, space):
- return space.wrap(u"mappingproxy(%s)" % (
- space.unicode_w(W_DictObject.descr_repr(self, space))))
-
-W_DictProxyObject.typedef = TypeDef(
- "mappingproxy", W_DictObject.typedef,
- __new__ = interp2app(W_DictProxyObject.descr_new),
- __init__ = interp2app(W_DictProxyObject.descr_init),
- __repr__ = interp2app(W_DictProxyObject.descr_repr),
-)
-
-
class ClassDictStrategy(DictStrategy):
"""Exposes a W_TypeObject.dict_w at app-level.
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -2,6 +2,7 @@
# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.error import oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
from pypy.interpreter.typedef import TypeDef, interp2app
@@ -11,6 +12,18 @@
def __init__(self, w_mapping):
self.w_mapping = w_mapping
+ @staticmethod
+ def descr_new(space, w_type, w_mapping):
+ if (not space.lookup(w_mapping, "__getitem__") or
+ space.isinstance_w(w_mapping, space.w_list) or
+ space.isinstance_w(w_mapping, space.w_tuple)):
+ raise oefmt(space.w_TypeError,
+ "mappingproxy() argument must be a mapping, not %T", w_mapping)
+ return W_DictProxyObject(w_mapping)
+
+ def descr_init(self, space, __args__):
+ pass
+
def descr_len(self, space):
return space.len(self.w_mapping)
@@ -27,7 +40,8 @@
return space.str(self.w_mapping)
def descr_repr(self, space):
- return space.repr(self.w_mapping)
+ return space.newunicode(u"mappingproxy(%s)" %
+ (space.unicode_w(space.repr(self.w_mapping)),))
@unwrap_spec(w_default=WrappedDefault(None))
def get_w(self, space, w_key, w_default):
@@ -47,6 +61,8 @@
W_DictProxyObject.typedef = TypeDef(
'mappingproxy',
+ __new__=interp2app(W_DictProxyObject.descr_new),
+ __init__=interp2app(W_DictProxyObject.descr_init),
__len__=interp2app(W_DictProxyObject.descr_len),
__getitem__=interp2app(W_DictProxyObject.descr_getitem),
__contains__=interp2app(W_DictProxyObject.descr_contains),
From pypy.commits at gmail.com Wed Aug 3 13:47:32 2016
From: pypy.commits at gmail.com (rlamy)
Date: Wed, 03 Aug 2016 10:47:32 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Implement comparison methods for
mappingproxy
Message-ID: <57a22e34.109a1c0a.ee027.40d3@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86006:361a24241f59
Date: 2016-08-03 18:47 +0100
http://bitbucket.org/pypy/pypy/changeset/361a24241f59/
Log: Implement comparison methods for mappingproxy
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -59,6 +59,19 @@
def copy_w(self, space):
return space.call_method(self.w_mapping, "copy")
+cmp_methods = {}
+def make_cmp_method(op):
+ def descr_op(self, space, w_other):
+ return getattr(space, op)(self.w_mapping, w_other)
+ descr_name = 'descr_' + op
+ descr_op.__name__ = descr_name
+ setattr(W_DictProxyObject, descr_name, descr_op)
+ cmp_methods['__%s__' % op] = interp2app(getattr(W_DictProxyObject, descr_name))
+
+for op in ['eq', 'ne', 'gt', 'ge', 'lt', 'le']:
+ make_cmp_method(op)
+
+
W_DictProxyObject.typedef = TypeDef(
'mappingproxy',
__new__=interp2app(W_DictProxyObject.descr_new),
@@ -73,5 +86,6 @@
keys=interp2app(W_DictProxyObject.keys_w),
values=interp2app(W_DictProxyObject.values_w),
items=interp2app(W_DictProxyObject.items_w),
- copy=interp2app(W_DictProxyObject.copy_w)
+ copy=interp2app(W_DictProxyObject.copy_w),
+ **cmp_methods
)
From pypy.commits at gmail.com Wed Aug 3 15:42:38 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Wed, 03 Aug 2016 12:42:38 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix pyparser (now allows async and
await as variable names)
Message-ID: <57a2492e.56421c0a.75916.57be@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86007:4ae7acb78ae8
Date: 2016-08-03 21:41 +0200
http://bitbucket.org/pypy/pypy/changeset/4ae7acb78ae8/
Log: Fix pyparser (now allows async and await as variable names)
diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py
--- a/pypy/interpreter/pyparser/pytokenizer.py
+++ b/pypy/interpreter/pyparser/pytokenizer.py
@@ -96,7 +96,9 @@
altindents = [0]
last_comment = ''
parenlevstart = (0, 0, "")
- last_token = ''
+ async_def = False
+ async_def_nl = False
+ async_def_indent = 0
# make the annotator happy
endDFA = DUMMY_DFA
@@ -182,6 +184,10 @@
raise TokenIndentationError(err, line, lnum, 0, token_list)
if altcolumn != altindents[-1]:
raise TabError(lnum, pos, line)
+ if async_def_nl and async_def_indent >= indents[-1]:
+ async_def = False
+ async_def_nl = False
+ async_def_indent = 0
else: # continued statement
if not line:
@@ -215,6 +221,8 @@
last_comment = ''
elif initial in '\r\n':
if parenlev <= 0:
+ if async_def:
+ async_def_nl = True
tok = (tokens.NEWLINE, last_comment, lnum, start, line)
token_list.append(tok)
last_comment = ''
@@ -254,10 +262,36 @@
if not verify_identifier(token):
raise TokenError("invalid character in identifier",
line, lnum, start + 1, token_list)
- if token == 'async' and not last_token == 'def':
- token_list.append((tokens.ASYNC, token, lnum, start, line))
- elif token == 'await' and not last_token == 'def':
- token_list.append((tokens.AWAIT, token, lnum, start, line))
+
+ if async_def: # inside 'async def' function
+ if token == 'async':
+ token_list.append((tokens.ASYNC, token, lnum, start, line))
+ elif token == 'await':
+ token_list.append((tokens.AWAIT, token, lnum, start, line))
+ else:
+ token_list.append((tokens.NAME, token, lnum, start, line))
+ elif token == 'async': # async token, look ahead
+ #ahead token
+ if pos < max:
+ as_pseudomatch = pseudoDFA.recognize(line, pos)
+ as_start = whiteSpaceDFA.recognize(line, pos)
+ if as_start < 0:
+ as_start = pos
+ as_end = as_pseudomatch
+
+ if as_start == as_end:
+ raise TokenError("Unknown character", line,
+ lnum, as_start + 1, token_list)
+
+ ahead_token = line[as_start:as_end]
+ if ahead_token == 'def':
+ async_def = True
+ async_def_indent = indents[-1]
+ token_list.append((tokens.ASYNC, token, lnum, start, line))
+ else:
+ token_list.append((tokens.NAME, token, lnum, start, line))
+ else:
+ token_list.append((tokens.NAME, token, lnum, start, line))
else:
token_list.append((tokens.NAME, token, lnum, start, line))
last_comment = ''
@@ -279,7 +313,6 @@
punct = tokens.OP
token_list.append((punct, token, lnum, start, line))
last_comment = ''
- last_token = token
else:
start = whiteSpaceDFA.recognize(line, pos)
if start < 0:
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -175,9 +175,13 @@
self.parse("await = 1")
self.parse("def async(): pass")
#async for
- self.parse("async def foo(): async for a in b: pass")
+ self.parse("""async def foo():
+ async for a in b:
+ pass""")
#async with
- self.parse("async def foo(): async with a: pass")
+ self.parse("""async def foo():
+ async with a:
+ pass""")
class TestPythonParserWithSpace:
From pypy.commits at gmail.com Wed Aug 3 15:51:17 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Wed, 03 Aug 2016 12:51:17 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: More parser tests for async and
await
Message-ID: <57a24b35.0675c20a.d6ef0.3c63@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86008:3f09fda522df
Date: 2016-08-03 21:50 +0200
http://bitbucket.org/pypy/pypy/changeset/3f09fda522df/
Log: More parser tests for async and await
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -170,6 +170,7 @@
def test_async_await(self):
self.parse("async def coro(): await func")
+ py.test.raises(SyntaxError, self.parse, 'await x')
#Test as var and func name
self.parse("async = 1")
self.parse("await = 1")
@@ -178,10 +179,14 @@
self.parse("""async def foo():
async for a in b:
pass""")
+ py.test.raises(SyntaxError, self.parse, 'def foo(): async for a in b: pass')
#async with
self.parse("""async def foo():
async with a:
pass""")
+ py.test.raises(SyntaxError, self.parse, 'def foo(): async with a: pass')
+
+
class TestPythonParserWithSpace:
diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py
--- a/pypy/interpreter/test/test_syntax.py
+++ b/pypy/interpreter/test/test_syntax.py
@@ -100,12 +100,6 @@
async def foo():
await await fut
- await x
-
- def foo(): async for a in b: pass
-
- def foo(): async with a: pass
-
""")
From pypy.commits at gmail.com Wed Aug 3 16:02:24 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 03 Aug 2016 13:02:24 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Record return from functions
as events too
Message-ID: <57a24dd0.08d11c0a.daf7e.02f8@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86009:84d843464c68
Date: 2016-08-03 22:04 +0200
http://bitbucket.org/pypy/pypy/changeset/84d843464c68/
Log: Record return from functions as events too
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -87,9 +87,9 @@
# be accessed also later
frame_vref()
jit.virtual_ref_finish(frame_vref, frame)
- if self.space.config.translation.reverse_debugger:
- from pypy.interpreter.reverse_debugging import leave_call
- leave_call(self.topframeref(), frame)
+ if self.space.config.translation.reverse_debugger:
+ from pypy.interpreter.reverse_debugging import leave_call
+ leave_call(self.topframeref(), frame)
# ________________________________________________________________
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -78,12 +78,14 @@
if dbstate.breakpoint_stack_id != 0 and caller_frame is not None:
if dbstate.breakpoint_stack_id == revdb.get_unique_id(caller_frame):
revdb.breakpoint(-2)
+ if we_are_translated():
+ stop_point_activate(-2)
def jump_backward(frame, jumpto):
# When we see a jump backward, we set 'f_revdb_nextline_instr' in
# such a way that the next instruction, at 'jumpto', will trigger
- # stop_point_at_start_of_line(). We have to trigger it even if
+ # stop_point_activate(). We have to trigger it even if
# 'jumpto' is not actually a start of line. For example, after a
# 'while foo:', the body ends with a JUMP_ABSOLUTE which
# jumps back to the *second* opcode of the while.
@@ -117,12 +119,12 @@
if ch == 0:
pass # we are at the start of a line now
else:
- # We are not, so don't call stop_point_at_start_of_line().
+ # We are not, so don't call stop_point_activate().
# We still have to fill f_revdb_nextline_instr.
call_stop_point_at_line = False
#
if call_stop_point_at_line:
- stop_point_at_start_of_line()
+ stop_point_activate()
cur += 1
ch = ord(co_revdb_linestarts[cur])
#
@@ -199,7 +201,7 @@
non_standard_code = NonStandardCode()
-def stop_point_at_start_of_line():
+def stop_point_activate(place=0):
if revdb.watch_save_state():
any_watch_point = False
space = dbstate.space
@@ -219,7 +221,7 @@
revdb.watch_restore_state(any_watch_point)
if watch_id != -1:
revdb.breakpoint(watch_id)
- revdb.stop_point()
+ revdb.stop_point(place)
def load_metavar(index):
@@ -382,7 +384,8 @@
indent))
revdb.send_linecache(frame.getcode().co_filename, lineno)
-def display_function_part(frame, max_lines_before, max_lines_after):
+def display_function_part(frame, max_lines_before, max_lines_after,
+ prompt="> "):
code = frame.getcode()
if code.co_filename.startswith(''):
return
@@ -400,7 +403,7 @@
#
for i in range(first_lineno, final_lineno + 1):
if i == current_lineno:
- revdb.send_output("> ")
+ revdb.send_output(prompt)
else:
revdb.send_output(" ")
revdb.send_linecache(code.co_filename, i, strip=False)
@@ -415,7 +418,12 @@
if cmd.c_arg1 == 0:
revdb.send_output("%s:\n" % (
file_and_lineno(frame, frame.get_last_lineno()),))
- display_function_part(frame, max_lines_before=8, max_lines_after=5)
+ if revdb.current_place() == -2:
+ prompt = "<<"
+ else:
+ prompt = "> "
+ display_function_part(frame, max_lines_before=8, max_lines_after=5,
+ prompt=prompt)
elif cmd.c_arg1 == 2:
display_function_part(frame, max_lines_before=1000,max_lines_after=1000)
else:
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -26,14 +26,14 @@
ANSWER_WATCH = 23
-def stop_point():
+def stop_point(place=0):
"""Indicates a point in the execution of the RPython program where
the reverse-debugger can stop. When reverse-debugging, we see
the "time" as the index of the stop-point that happened.
"""
if we_are_translated():
if fetch_translated_config().translation.reverse_debugger:
- llop.revdb_stop_point(lltype.Void)
+ llop.revdb_stop_point(lltype.Void, place)
def register_debug_command(command, lambda_func):
"""Register the extra RPython-implemented debug command."""
@@ -75,6 +75,12 @@
unique id greater or equal."""
return llop.revdb_get_value(lltype.SignedLongLong, 'u')
+def current_place():
+ """For RPython debug commands: the value of the 'place' argument
+ passed to stop_point().
+ """
+ return llop.revdb_get_value(lltype.Signed, 'p')
+
## @specialize.arg(1)
## def go_forward(time_delta, callback):
## """For RPython debug commands: tells that after this function finishes,
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -145,14 +145,17 @@
static void record_stop_point(void);
static void replay_stop_point(void);
+static long current_place;
RPY_EXTERN
-void rpy_reverse_db_stop_point(void)
+void rpy_reverse_db_stop_point(long place)
{
if (!RPY_RDB_REPLAY)
record_stop_point();
- else
+ else {
+ current_place = place;
replay_stop_point();
+ }
}
@@ -1244,6 +1247,8 @@
return (flag_io_disabled == FID_REGULAR_MODE ?
rpy_revdb.unique_id_seen :
saved_state.unique_id_seen);
+ case 'p': /* current_place() */
+ return current_place;
default:
return -1;
}
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -129,9 +129,9 @@
_RPY_REVDB_PRUID(); \
} while (0)
-#define OP_REVDB_STOP_POINT(r) \
+#define OP_REVDB_STOP_POINT(place, r) \
if (++rpy_revdb.stop_point_seen == rpy_revdb.stop_point_break) \
- rpy_reverse_db_stop_point()
+ rpy_reverse_db_stop_point(place)
#define OP_REVDB_SEND_ANSWER(cmd, arg1, arg2, arg3, ll_string, r) \
rpy_reverse_db_send_answer(cmd, arg1, arg2, arg3, ll_string)
@@ -176,7 +176,7 @@
RPY_EXTERN void rpy_reverse_db_flush(void);
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
-RPY_EXTERN void rpy_reverse_db_stop_point(void);
+RPY_EXTERN void rpy_reverse_db_stop_point(long place);
RPY_EXTERN void rpy_reverse_db_send_answer(int cmd, int64_t arg1, int64_t arg2,
int64_t arg3, RPyString *extra);
RPY_EXTERN Signed rpy_reverse_db_identityhash(struct pypy_header0 *obj);
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -341,6 +341,8 @@
if extra == 'get-value':
revdb.send_answer(100, revdb.current_time(),
revdb.total_time())
+ if extra == 'current-place':
+ revdb.send_answer(200, revdb.current_place())
## if extra == 'go-fw':
## revdb.go_forward(1, went_fw)
## if cmdline == 'set-break-after-0':
@@ -375,7 +377,7 @@
for i, op in enumerate(argv[1:]):
dbstate.stuff = Stuff()
dbstate.stuff.x = i + 1000
- revdb.stop_point()
+ revdb.stop_point(i * 10)
print op
if i == 1:
if os.fork() == 0: # child
@@ -419,6 +421,18 @@
child.send(Message(1, extra='get-value'))
child.expect(100, 1, 3)
+ def test_current_place(self):
+ child = self.replay()
+ child.send(Message(1, extra='current-place'))
+ child.expect(200, 0)
+ child.expect(42, 1, -43, -44, 'current-place')
+ child.expect(ANSWER_READY, 1, Ellipsis)
+ child.send(Message(CMD_FORWARD, 2))
+ child.expect(ANSWER_READY, 3, Ellipsis)
+ child.send(Message(1, extra='current-place'))
+ child.expect(200, 20)
+ child.expect(42, 1, -43, -44, 'current-place')
+
## def test_go_fw(self):
## child = self.replay()
## child.send(Message(1, extra='go-fw'))
From pypy.commits at gmail.com Wed Aug 3 17:23:47 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 03 Aug 2016 14:23:47 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Add __pypy__.revdb_stop(),
which makes an explicit breakpoint in revdb.
Message-ID: <57a260e3.a427c20a.88889.6999@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86010:88575763814a
Date: 2016-08-03 23:25 +0200
http://bitbucket.org/pypy/pypy/changeset/88575763814a/
Log: Add __pypy__.revdb_stop(), which makes an explicit breakpoint in
revdb. Use it in app_main to stop just after running the program
(including if there was an exception).
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -79,10 +79,16 @@
sys.stdout if needed, etc.
"""
try:
+ from __pypy__ import revdb_stop
+ except ImportError:
+ revdb_stop = None
+ try:
# run it
try:
f(*fargs, **fkwds)
finally:
+ if revdb_stop:
+ revdb_stop()
sys.settrace(None)
sys.setprofile(None)
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -89,7 +89,7 @@
jit.virtual_ref_finish(frame_vref, frame)
if self.space.config.translation.reverse_debugger:
from pypy.interpreter.reverse_debugging import leave_call
- leave_call(self.topframeref(), frame)
+ leave_call(self.topframeref(), got_exception)
# ________________________________________________________________
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -74,12 +74,16 @@
if code.co_revdb_linestarts is None:
build_co_revdb_linestarts(code)
-def leave_call(caller_frame, callee_frame):
+def leave_call(caller_frame, got_exception):
if dbstate.breakpoint_stack_id != 0 and caller_frame is not None:
if dbstate.breakpoint_stack_id == revdb.get_unique_id(caller_frame):
revdb.breakpoint(-2)
if we_are_translated():
- stop_point_activate(-2)
+ stop_point_activate(-2 + got_exception)
+
+def stop_point():
+ if we_are_translated():
+ revdb.breakpoint(-3)
def jump_backward(frame, jumpto):
@@ -418,10 +422,12 @@
if cmd.c_arg1 == 0:
revdb.send_output("%s:\n" % (
file_and_lineno(frame, frame.get_last_lineno()),))
- if revdb.current_place() == -2:
- prompt = "<<"
+ if revdb.current_place() == -2: # <= this is the arg to stop_point()
+ prompt = "<<" # return
+ elif revdb.current_place() == -1:
+ prompt = "!!" # exceptional return
else:
- prompt = "> "
+ prompt = "> " # plain line
display_function_part(frame, max_lines_before=8, max_lines_after=5,
prompt=prompt)
elif cmd.c_arg1 == 2:
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -129,3 +129,6 @@
features = detect_cpu.getcpufeatures(model)
self.extra_interpdef('jit_backend_features',
'space.wrap(%r)' % features)
+ if self.space.config.translation.reverse_debugger:
+ self.extra_interpdef('revdb_stop',
+ 'interp_magic.revdb_stop')
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -204,3 +204,7 @@
after changing the Python code.
"""
return space.wrap(space._side_effects_ok())
+
+def revdb_stop(space):
+ from pypy.interpreter.reverse_debugging import stop_point
+ stop_point()
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -134,6 +134,9 @@
elif break_at[0] == 'W':
kind = 'watchpoint'
name = self.pgroup.all_breakpoints.sources.get(num, '??')
+ elif num == -3:
+ kind = 'stoppoint'
+ name = 'explicit stop'
else:
kind = '?????point'
name = repr(break_at)
@@ -191,9 +194,11 @@
printing = []
for num in b.regular_breakpoint_nums():
kind, name = self._bp_kind(num)
- printing.append('%s %s %d: %s' % (
+ printing.append('%s %s%s: %s' % (
'Reverse-hit' if backward else 'Hit',
- kind, num, name))
+ kind,
+ '' if kind == 'stoppoint' else ' %d' % (num,),
+ name))
self.print_extra_pending_info = '\n'.join(printing)
if self.pgroup.get_current_time() != b.time:
target_time = b.time
From pypy.commits at gmail.com Thu Aug 4 06:45:49 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 04 Aug 2016 03:45:49 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: merge default
Message-ID: <57a31cdd.2624c20a.7a5a1.499c@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86012:bd6d62fca9de
Date: 2016-08-04 12:44 +0200
http://bitbucket.org/pypy/pypy/changeset/bd6d62fca9de/
Log: merge default
diff --git a/rpython/translator/c/src/float.h b/rpython/translator/c/src/float.h
--- a/rpython/translator/c/src/float.h
+++ b/rpython/translator/c/src/float.h
@@ -34,10 +34,39 @@
#define OP_CAST_FLOAT_TO_UINT(x,r) r = (Unsigned)(x)
#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x)
#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x)
-#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x)
-#define OP_CAST_ULONGLONG_TO_FLOAT(x,r) r = (double)(x)
+#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = rpy_cast_longlong_to_float(x)
+#define OP_CAST_ULONGLONG_TO_FLOAT(x,r) r = rpy_cast_ulonglong_to_float(x)
#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x)
+#ifdef _WIN32
+/* The purpose of these two functions is to work around a MSVC bug.
+ The expression '(double)131146795334735160LL' will lead to bogus
+ rounding, but apparently everything is fine if we write instead
+ rpy_cast_longlong_to_float(131146795334735160LL). Tested with
+ MSVC 2008. Note that even if the two functions contain just
+ 'return (double)x;' it seems to work on MSVC 2008, but I don't
+ trust that there are no other corner cases.
+ http://stackoverflow.com/questions/33829101/incorrect-double-to-long-conversion
+*/
+static _inline double rpy_cast_longlong_to_float(long long x)
+{
+ unsigned int lo = (unsigned int)x;
+ double result = lo;
+ result += ((int)(x >> 32)) * 4294967296.0;
+ return result;
+}
+static _inline double rpy_cast_ulonglong_to_float(unsigned long long x)
+{
+ unsigned int lo = (unsigned int)x;
+ double result = lo;
+ result += ((unsigned int)(x >> 32)) * 4294967296.0;
+ return result;
+}
+#else
+# define rpy_cast_longlong_to_float(x) ((double)(x))
+# define rpy_cast_ulonglong_to_float(x) ((double)(x))
+#endif
+
#ifdef HAVE_LONG_LONG
#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x)
#define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x)
From pypy.commits at gmail.com Thu Aug 4 06:45:47 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 04 Aug 2016 03:45:47 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: moved setup_once for the vector
extension, debug_print when NotAVectorizableLoop is raised
Message-ID: <57a31cdb.4675c20a.6c840.4bf3@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86011:613b2a914410
Date: 2016-08-04 12:44 +0200
http://bitbucket.org/pypy/pypy/changeset/613b2a914410/
Log: moved setup_once for the vector extension, debug_print when
NotAVectorizableLoop is raised
diff --git a/rpython/jit/backend/llsupport/vector_ext.py b/rpython/jit/backend/llsupport/vector_ext.py
--- a/rpython/jit/backend/llsupport/vector_ext.py
+++ b/rpython/jit/backend/llsupport/vector_ext.py
@@ -8,6 +8,9 @@
failnbail_transformation)
from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.rtyper.lltypesystem import lltype
+from rpython.rlib.debug import debug_print
class TypeRestrict(object):
ANY_TYPE = '\x00'
@@ -192,11 +195,11 @@
continue
curvecinfo = forwarded_vecinfo(arg)
if curvecinfo.bytesize != bytesize:
- raise NotAVectorizeableLoop("op match size first type failed %d != %d" % \
- (curvecinfo.bytesize != bytesize))
+ debug_print("op match size first type failed")
+ raise NotAVectorizeableLoop
if curvecinfo.datatype != datatype:
- raise NotAVectorizeableLoop("op match size first type failed (datatype). %s != %s" % \
- (curvecinfo.datatype != datatype))
+ debug_print("op match size first type failed (datatype)")
+ raise NotAVectorizeableLoop
return None
TR_ANY = TypeRestrict()
diff --git a/rpython/jit/metainterp/jitexc.py b/rpython/jit/metainterp/jitexc.py
--- a/rpython/jit/metainterp/jitexc.py
+++ b/rpython/jit/metainterp/jitexc.py
@@ -62,10 +62,8 @@
self.red_int, self.red_ref, self.red_float)
class NotAVectorizeableLoop(JitException):
- def __init__(self, msg=""):
- self.msg = msg
def __str__(self):
- return 'NotAVectorizeableLoop(%s)' % self.msg
+ return 'NotAVectorizeableLoop()'
class NotAProfitableLoop(JitException):
def __str__(self):
diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
--- a/rpython/jit/metainterp/optimizeopt/vector.py
+++ b/rpython/jit/metainterp/optimizeopt/vector.py
@@ -26,6 +26,7 @@
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.debug import debug_print, debug_start, debug_stop
from rpython.rlib.jit import Counters
+from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.jit.backend.llsupport.symbolic import (WORD as INT_WORD,
SIZEOF_FLOAT as FLOAT_WORD)
@@ -146,9 +147,8 @@
#
info.label_op = loop.label
return info, loop.finaloplist(jitcell_token=jitcell_token, reset_label_token=False)
- except NotAVectorizeableLoop as e:
+ except NotAVectorizeableLoop:
debug_stop("vec-opt-loop")
- debug_print("failed to vectorize loop. reason: %s" % e.msg)
# vectorization is not possible
return loop_info, version.loop.finaloplist()
except NotAProfitableLoop:
@@ -160,8 +160,6 @@
debug_stop("vec-opt-loop")
debug_print("failed to vectorize loop. THIS IS A FATAL ERROR!")
if we_are_translated():
- from rpython.rtyper.lltypesystem import lltype
- from rpython.rtyper.lltypesystem.lloperation import llop
llop.debug_print_traceback(lltype.Void)
else:
raise
@@ -230,12 +228,17 @@
self.linear_find_smallest_type(loop)
byte_count = self.smallest_type_bytes
vsize = self.vector_ext.vec_size()
- if vsize == 0 or byte_count == 0 or loop.label.getopnum() != rop.LABEL:
- # stop, there is no chance to vectorize this trace
+ # stop, there is no chance to vectorize this trace
# we cannot optimize normal traces (if there is no label)
- raise NotAVectorizeableLoop("vsize %d byte_count %d not label? %d" % \
- (vsize, byte_count, loop.label.getopnum() != rop.LABEL))
-
+ if vsize == 0:
+ debug_print("vector size is zero")
+ raise NotAVectorizeableLoop
+ if byte_count == 0:
+ debug_print("could not find smallest type")
+ raise NotAVectorizeableLoop
+ if loop.label.getopnum() != rop.LABEL:
+ debug_print("not a loop, can only vectorize loops")
+ raise NotAVectorizeableLoop
# find index guards and move to the earliest position
graph = self.analyse_index_calculations(loop)
if graph is not None:
@@ -432,7 +435,8 @@
intersecting edges.
"""
if len(self.packset.packs) == 0:
- raise NotAVectorizeableLoop("packset is empty")
+ debug_print("packset is empty")
+ raise NotAVectorizeableLoop
i = 0
j = 0
end_ij = len(self.packset.packs)
@@ -664,7 +668,8 @@
if forward and origin_pack.is_accumulating():
# in this case the splitted accumulator must
# be combined. This case is not supported
- raise NotAVectorizeableLoop("splitted accum must be flushed here (not supported)")
+ debug_print("splitted accum must be flushed here (not supported)")
+ raise NotAVectorizeableLoop
#
if self.contains_pair(lnode, rnode):
return None
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1857,7 +1857,8 @@
self.jitlog.setup_once()
debug_print(self.jit_starting_line)
self.cpu.setup_once()
- self.cpu.vector_ext.setup_once(self.cpu.assembler)
+ if self.cpu.vector_ext:
+ self.cpu.vector_ext.setup_once(self.cpu.assembler)
if not self.profiler.initialized:
self.profiler.start()
self.profiler.initialized = True
From pypy.commits at gmail.com Thu Aug 4 11:15:26 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 04 Aug 2016 08:15:26 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Fix tests for compatibility with
CPython, which makes them pass on this branch as well
Message-ID: <57a35c0e.262ec20a.1424d.b174@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86013:2467b1a9d1ba
Date: 2016-08-04 16:14 +0100
http://bitbucket.org/pypy/pypy/changeset/2467b1a9d1ba/
Log: Fix tests for compatibility with CPython, which makes them pass on
this branch as well
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -970,7 +970,6 @@
raises(TypeError, setattr, list, 'foobar', 42)
raises(TypeError, delattr, dict, 'keys')
raises(TypeError, 'int.__dict__["a"] = 1')
- raises(TypeError, 'int.__dict__.clear()')
def test_nontype_in_mro(self):
class OldStyle:
@@ -1028,10 +1027,9 @@
pass
a = A()
+ d = A.__dict__
A.x = 1
- assert A.__dict__["x"] == 1
- A.__dict__['x'] = 5
- assert A.x == 5
+ assert d["x"] == 1
def test_we_already_got_one_1(self):
# Issue #2079: highly obscure: CPython complains if we say
From pypy.commits at gmail.com Thu Aug 4 11:30:50 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 04 Aug 2016 08:30:50 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Do not attempt to modify a class
dict, use setattr instead
Message-ID: <57a35faa.c3881c0a.e7298.7567@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86014:4244afa6cdb0
Date: 2016-08-04 16:29 +0100
http://bitbucket.org/pypy/pypy/changeset/4244afa6cdb0/
Log: Do not attempt to modify a class dict, use setattr instead
diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py
--- a/pypy/module/cppyy/pythonify.py
+++ b/pypy/module/cppyy/pythonify.py
@@ -175,7 +175,7 @@
"__new__" : make_new(class_name),
}
pycppclass = metacpp(class_name, _drop_cycles(bases), d)
-
+
# cache result early so that the class methods can find the class itself
setattr(scope, final_class_name, pycppclass)
@@ -192,13 +192,10 @@
for dm_name in cppclass.get_datamember_names():
cppdm = cppclass.get_datamember(dm_name)
- # here, setattr() can not be used, because a data member can shadow one in
- # its base class, resulting in the __set__() of its base class being called
- # by setattr(); so, store directly on the dictionary
- pycppclass.__dict__[dm_name] = cppdm
+ setattr(pycppclass, dm_name, cppdm)
import cppyy
if cppyy._is_static(cppdm): # TODO: make this a method of cppdm
- metacpp.__dict__[dm_name] = cppdm
+ setattr(metacpp, dm_name, cppdm)
# the call to register will add back-end specific pythonizations and thus
# needs to run first, so that the generic pythonizations can use them
@@ -413,7 +410,7 @@
lib = cppyy._load_dictionary(name)
_loaded_dictionaries[name] = lib
return lib
-
+
def _init_pythonify():
# cppyy should not be loaded at the module level, as that will trigger a
# call to space.getbuiltinmodule(), which will cause cppyy to be loaded
From pypy.commits at gmail.com Thu Aug 4 12:06:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 04 Aug 2016 09:06:33 -0700 (PDT)
Subject: [pypy-commit] pypy default: Issue #2363: extracted a unit test that
fails
Message-ID: <57a36809.a427c20a.88889.cbb3@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86015:1d29fd069ef0
Date: 2016-08-04 17:40 +0200
http://bitbucket.org/pypy/pypy/changeset/1d29fd069ef0/
Log: Issue #2363: extracted a unit test that fails
diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py
--- a/rpython/memory/gc/test/test_object_pinning.py
+++ b/rpython/memory/gc/test/test_object_pinning.py
@@ -1,6 +1,7 @@
import py
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC, WORD
+from rpython.memory.gc.incminimark import GCFLAG_VISITED
from test_direct import BaseDirectGCTest
T = lltype.GcForwardReference()
@@ -981,3 +982,56 @@
self.gc.major_collection_step() # should not crash reading 'ptr1'!
del self.gc.TEST_VISIT_SINGLE_STEP
+
+
+ def test_pin_bug2(self):
+ #
+ # * we have an old object A that points to a pinned object B
+ #
+ # * we unpin B
+ #
+ # * the next minor_collection() is done in STATE_MARKING==1
+ # when the object A is already black
+ #
+ # * _minor_collection() => _visit_old_objects_pointing_to_pinned()
+ # which will move the now-unpinned B out of the nursery, to B'
+ #
+ # At that point we need to take care of colors, otherwise we
+ # get a black object (A) pointing to a white object (B'),
+ # which must never occur.
+ #
+ ptrA = self.malloc(T)
+ ptrA.someInt = 42
+ adrA = llmemory.cast_ptr_to_adr(ptrA)
+ res = self.gc.pin(adrA)
+ assert res
+
+ ptrC = self.malloc(S)
+ self.stackroots.append(ptrC)
+
+ ptrB = self.malloc(S)
+ ptrB.data = ptrA
+ self.stackroots.append(ptrB)
+
+ self.gc.collect()
+ ptrB = self.stackroots[-1] # now old and outside the nursery
+ ptrC = self.stackroots[-2] # another random old object, traced later
+ adrB = llmemory.cast_ptr_to_adr(ptrB)
+
+ self.gc.minor_collection()
+ assert self.gc.gc_state == self.STATE_SCANNING
+ self.gc.major_collection_step()
+ assert self.gc.gc_state == self.STATE_MARKING
+ assert not (self.gc.header(adrB).tid & GCFLAG_VISITED) # not black yet
+
+ self.gc.TEST_VISIT_SINGLE_STEP = True
+ self.gc.major_collection_step()
+ assert self.gc.gc_state == self.STATE_MARKING
+ assert self.gc.header(adrB).tid & GCFLAG_VISITED # now black
+ # but ptrC is not traced yet, which is why we're still in STATE_MARKING
+ assert self.gc.old_objects_pointing_to_pinned.tolist() == [adrB]
+
+ self.gc.unpin(adrA)
+
+ self.gc.DEBUG = 2
+ self.gc.minor_collection()
From pypy.commits at gmail.com Thu Aug 4 12:06:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 04 Aug 2016 09:06:35 -0700 (PDT)
Subject: [pypy-commit] pypy default: Issue #2363: fix. The issue was a
"black->white" rare case in the
Message-ID: <57a3680b.2916c20a.891ef.cc79@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86016:93a5d95ec126
Date: 2016-08-04 18:06 +0200
http://bitbucket.org/pypy/pypy/changeset/93a5d95ec126/
Log: Issue #2363: fix. The issue was a "black->white" rare case in the
incremental GC, which is forbidden.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1633,6 +1633,14 @@
# have been modified and need rescanning.
self.old_objects_pointing_to_young.foreach(
self._add_to_more_objects_to_trace, None)
+ # Old black objects pointing to pinned objects that may no
+ # longer be pinned now: careful,
+ # _visit_old_objects_pointing_to_pinned() will move the
+ # previously-pinned object, and that creates a white object.
+ # We prevent the "black->white" situation by forcing the
+ # old black object to become gray again.
+ self.old_objects_pointing_to_pinned.foreach(
+ self._add_to_more_objects_to_trace_if_black, None)
#
# First, find the roots that point to young objects. All nursery
# objects found are copied out of the nursery, and the occasional
@@ -2144,6 +2152,10 @@
self.header(obj).tid &= ~GCFLAG_VISITED
self.more_objects_to_trace.append(obj)
+ def _add_to_more_objects_to_trace_if_black(self, obj, ignored):
+ if self.header(obj).tid & GCFLAG_VISITED:
+ self._add_to_more_objects_to_trace(obj, ignored)
+
def minor_and_major_collection(self):
# First, finish the current major gc, if there is one in progress.
# This is a no-op if the gc_state is already STATE_SCANNING.
From pypy.commits at gmail.com Thu Aug 4 12:08:41 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 04 Aug 2016 09:08:41 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: encode the jitdrivers name in
start_trace, jitlog version bump
Message-ID: <57a36889.c15e1c0a.917d6.80a7@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86017:74b2ed7be48b
Date: 2016-08-04 16:32 +0200
http://bitbucket.org/pypy/pypy/changeset/74b2ed7be48b/
Log: encode the jitdrivers name in start_trace, jitlog version bump
diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -1051,8 +1051,9 @@
metainterp_sd = metainterp.staticdata
jitdriver_sd = metainterp.jitdriver_sd
#
+ jd_name = jitdriver_sd.jitdriver.name
metainterp_sd.jitlog.start_new_trace(metainterp_sd,
- faildescr=resumekey, entry_bridge=False)
+ faildescr=resumekey, entry_bridge=False, jd_name=jd_name)
#
if isinstance(resumekey, ResumeAtPositionDescr):
inline_short_preamble = False
diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py
--- a/rpython/rlib/rjitlog/rjitlog.py
+++ b/rpython/rlib/rjitlog/rjitlog.py
@@ -212,7 +212,7 @@
return method
return decor
-JITLOG_VERSION = 1
+JITLOG_VERSION = 2
JITLOG_VERSION_16BIT_LE = struct.pack("
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86018:d108f2d1bd0d
Date: 2016-08-04 18:07 +0200
http://bitbucket.org/pypy/pypy/changeset/d108f2d1bd0d/
Log: translation fixes batch 1
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -668,6 +668,8 @@
# TODO
ops.BUILD_LIST_FROM_ARG: 1,
+ # TODO
+ ops.LOAD_CLASSDEREF: 1,
}
diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py
--- a/pypy/interpreter/pyparser/pytokenizer.py
+++ b/pypy/interpreter/pyparser/pytokenizer.py
@@ -273,17 +273,11 @@
elif token == 'async': # async token, look ahead
#ahead token
if pos < max:
- as_pseudomatch = pseudoDFA.recognize(line, pos)
- as_start = whiteSpaceDFA.recognize(line, pos)
- if as_start < 0:
- as_start = pos
- as_end = as_pseudomatch
-
- if as_start == as_end:
- raise TokenError("Unknown character", line,
- lnum, as_start + 1, token_list)
-
- ahead_token = line[as_start:as_end]
+ async_end = pseudoDFA.recognize(line, pos)
+ assert async_end >= 3
+ async_start = async_end - 3
+ assert async_start >= 0
+ ahead_token = line[async_start:async_end]
if ahead_token == 'def':
async_def = True
async_def_indent = indents[-1]
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -226,10 +226,11 @@
size = rffi.sizeof(rffi.VOIDP)
return size
- def descr_cast(self, space, w_args, **w_kwds):
- self._check_released(space)
- newitemsize = self.get_native_fmtchar(w_args._val(w_args))
- mv = W_MemoryView(self.buf, w_args._val(w_args), newitemsize)
+ def descr_cast(self, space, w_args, w_kwds):
+ # XXX fixme. does not do anything near cpython (see memoryobjet.c memory_cast)
+ #self._check_released(space)
+ #newitemsize = self.get_native_fmtchar(w_args._val(w_args))
+ return W_MemoryView(self.buf, self.format, self.itemsize)
return mv
From pypy.commits at gmail.com Thu Aug 4 13:29:22 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 04 Aug 2016 10:29:22 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Add failing cpyext test clarifying
the expected behaviour of type->tp_dict
Message-ID: <57a37b72.45c8c20a.3d264.4834@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86019:332fe6be115d
Date: 2016-08-04 18:28 +0100
http://bitbucket.org/pypy/pypy/changeset/332fe6be115d/
Log: Add failing cpyext test clarifying the expected behaviour of
type->tp_dict
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -277,9 +277,23 @@
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
- ''')])
+ '''),
+ ("get_type_dict", "METH_O",
+ '''
+ PyObject* value = args->ob_type->tp_dict;
+ if (value == NULL) value = Py_None;
+ Py_INCREF(value);
+ return value;
+ '''),
+ ])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
+ assert type(module.get_type_dict(obj)) is dict
+ d = module.get_type_dict(1)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
From pypy.commits at gmail.com Thu Aug 4 16:09:16 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 04 Aug 2016 13:09:16 -0700 (PDT)
Subject: [pypy-commit] pypy default: add failing test - cpyext uses a
buffered FILE* but W_File uses a non-bufferd int fid
Message-ID: <57a3a0ec.041f1c0a.342ac.d227@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86021:700c5913e7ea
Date: 2016-08-04 21:35 +0300
http://bitbucket.org/pypy/pypy/changeset/700c5913e7ea/
Log: add failing test - cpyext uses a buffered FILE* but W_File uses a
non-bufferd int fid
diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py
--- a/pypy/module/cpyext/pyfile.py
+++ b/pypy/module/cpyext/pyfile.py
@@ -1,6 +1,6 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
- cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, fdopen)
+ cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, c_fdopen)
from pypy.module.cpyext.pyobject import PyObject
from pypy.module.cpyext.object import Py_PRINT_RAW
from pypy.interpreter.error import (OperationError, oefmt,
@@ -64,7 +64,7 @@
if (fd < 0 or not mode or mode[0] not in ['r', 'w', 'a', 'U'] or
('U' in mode and ('w' in mode or 'a' in mode))):
raise oefmt(space.w_IOError, 'invalid fileno or mode')
- ret = fdopen(fd, mode)
+ ret = c_fdopen(fd, mode)
if not ret:
raise exception_from_saved_errno(space, space.w_IOError)
return ret
diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py
--- a/pypy/module/cpyext/test/test_pyfile.py
+++ b/pypy/module/cpyext/test/test_pyfile.py
@@ -1,5 +1,4 @@
from pypy.conftest import option
-from pypy.module.cpyext.api import fopen, fclose, fwrite
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.object import Py_PRINT_RAW
@@ -133,6 +132,15 @@
return PyLong_FromLong(0);
return PyLong_FromLong(ftell(fp));
"""),
+ ("read_10", "METH_O",
+ """
+ char s[10];
+ FILE * fp = PyFile_AsFile(args);
+ if (fp == NULL)
+ return PyLong_FromLong(0);
+ fread(s, 1, 10, fp);
+ return PyLong_FromLong(ftell(fp));
+ """),
])
filename = self.udir + "/_test_file"
with open(filename, 'w') as fid:
@@ -142,5 +150,12 @@
t_py = fid.tell()
assert t_py == 80
t_c = module.get_c_tell(fid)
- assert t_c == t_py
+ assert t_c == t_py
+ print '-------- tell ',t_c
+ t_c = module.read_10(fid)
+ assert t_c == t_py + 10
+ print '-------- tell ',t_c
+ t_py = fid.tell()
+ assert t_c == t_py, 'after a fread, c level ftell(fp) %d but PyFile.tell() %d' % (t_c, t_py)
+
From pypy.commits at gmail.com Thu Aug 4 16:09:18 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 04 Aug 2016 13:09:18 -0700 (PDT)
Subject: [pypy-commit] pypy default: refactor - reuse externals from rfile
Message-ID: <57a3a0ee.53b81c0a.e11d7.d2ea@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86022:3c04fe2c5184
Date: 2016-08-04 21:36 +0300
http://bitbucket.org/pypy/pypy/changeset/3c04fe2c5184/
Log: refactor - reuse externals from rfile
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -11,6 +11,9 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from rpython.rlib.objectmodel import dont_inline
+from rpython.rlib.rfile import (FILEP, c_fread, c_fclose, c_fwrite,
+ c_fdopen, c_fileno,
+ c_fopen)# for tests
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -85,44 +88,32 @@
assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
-FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
dash = '_'
else:
dash = ''
-fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
-fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
- FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
-_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
- if not is_valid_fd(fileno(fp)):
+ if not is_valid_fd(c_fileno(fp)):
return -1
- return _fclose(fp)
+ return c_fclose(fp)
-_fwrite = rffi.llexternal('fwrite',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fwrite(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fwrite(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fwrite(buf, sz, n, fp)
-_fread = rffi.llexternal('fread',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fread(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fread(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fread(buf, sz, n, fp)
_feof = rffi.llexternal('feof', [FILEP], rffi.INT)
def feof(fp):
- validate_fd(fileno(fp))
+ validate_fd(c_fileno(fp))
return _feof(fp)
def is_valid_fp(fp):
- return is_valid_fd(fileno(fp))
+ return is_valid_fd(c_fileno(fp))
pypy_decl = 'pypy_decl.h'
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.eval import (
Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
-from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
+from pypy.module.cpyext.api import c_fopen, c_fclose, c_fileno, Py_ssize_tP
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.astcompiler import consts
from rpython.tool.udir import udir
@@ -130,19 +130,19 @@
def test_run_file(self, space, api):
filepath = udir / "cpyext_test_runfile.py"
filepath.write("raise ZeroDivisionError")
- fp = fopen(str(filepath), "rb")
+ fp = c_fopen(str(filepath), "rb")
filename = rffi.str2charp(str(filepath))
w_globals = w_locals = space.newdict()
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_ZeroDivisionError
api.PyErr_Clear()
# try again, but with a closed file
- fp = fopen(str(filepath), "rb")
- os.close(fileno(fp))
+ fp = c_fopen(str(filepath), "rb")
+ os.close(c_fileno(fp))
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_IOError
api.PyErr_Clear()
From pypy.commits at gmail.com Thu Aug 4 16:09:19 2016
From: pypy.commits at gmail.com (mattip)
Date: Thu, 04 Aug 2016 13:09:19 -0700 (PDT)
Subject: [pypy-commit] pypy default: fix for 700c5913e7ea - force unbuffered
FILE* use
Message-ID: <57a3a0ef.cb7f1c0a.515df.cf21@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86023:01f46383f3ce
Date: 2016-08-04 21:37 +0300
http://bitbucket.org/pypy/pypy/changeset/01f46383f3ce/
Log: fix for 700c5913e7ea - force unbuffered FILE* use
diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py
--- a/pypy/module/cpyext/pyfile.py
+++ b/pypy/module/cpyext/pyfile.py
@@ -1,4 +1,5 @@
from rpython.rtyper.lltypesystem import rffi, lltype
+from rpython.rlib.rfile import c_setvbuf, _IONBF
from pypy.module.cpyext.api import (
cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, c_fdopen)
from pypy.module.cpyext.pyobject import PyObject
@@ -67,8 +68,9 @@
ret = c_fdopen(fd, mode)
if not ret:
raise exception_from_saved_errno(space, space.w_IOError)
+ # XXX fix this once use-file-star-for-file lands
+ c_setvbuf(ret, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0)
return ret
-
@cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject)
def PyFile_FromFile(space, fp, name, mode, close):
From pypy.commits at gmail.com Fri Aug 5 02:30:29 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 04 Aug 2016 23:30:29 -0700 (PDT)
Subject: [pypy-commit] pypy default: Found out why we couldn't call
_add_to_more_objects_to_trace_*if_black*
Message-ID: <57a43285.2916c20a.891ef.a995@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86024:9adb8eec4ac2
Date: 2016-08-05 08:32 +0200
http://bitbucket.org/pypy/pypy/changeset/9adb8eec4ac2/
Log: Found out why we couldn't call
_add_to_more_objects_to_trace_*if_black* here. Potential obscure fix
for other cases too.
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1632,7 +1632,7 @@
# This is because these are precisely the old objects that
# have been modified and need rescanning.
self.old_objects_pointing_to_young.foreach(
- self._add_to_more_objects_to_trace, None)
+ self._add_to_more_objects_to_trace_if_black, None)
# Old black objects pointing to pinned objects that may no
# longer be pinned now: careful,
# _visit_old_objects_pointing_to_pinned() will move the
@@ -2269,6 +2269,11 @@
if (not self.objects_to_trace.non_empty() and
not self.more_objects_to_trace.non_empty()):
#
+ # First, 'prebuilt_root_objects' might have grown since
+ # we scanned it in collect_roots() (rare case). Rescan.
+ self.collect_nonstack_roots()
+ self.visit_all_objects()
+ #
if self.rrc_enabled:
self.rrc_major_collection_trace()
#
@@ -2449,21 +2454,30 @@
return nobjects
- def collect_roots(self):
- # Collect all roots. Starts from all the objects
- # from 'prebuilt_root_objects'.
+ def collect_nonstack_roots(self):
+ # Non-stack roots: first, the objects from 'prebuilt_root_objects'
self.prebuilt_root_objects.foreach(self._collect_obj, None)
#
- # Add the roots from the other sources.
+ # Add the roots from static prebuilt non-gc structures
self.root_walker.walk_roots(
- IncrementalMiniMarkGC._collect_ref_stk, # stack roots
- IncrementalMiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures
+ None,
+ IncrementalMiniMarkGC._collect_ref_stk,
None) # we don't need the static in all prebuilt gc objects
#
# If we are in an inner collection caused by a call to a finalizer,
# the 'run_finalizers' objects also need to be kept alive.
self.enum_pending_finalizers(self._collect_obj, None)
+ def collect_roots(self):
+ # Collect all roots. Starts from the non-stack roots.
+ self.collect_nonstack_roots()
+ #
+ # Add the stack roots.
+ self.root_walker.walk_roots(
+ IncrementalMiniMarkGC._collect_ref_stk, # stack roots
+ None,
+ None)
+
def enumerate_all_roots(self, callback, arg):
self.prebuilt_root_objects.foreach(callback, arg)
MovingGCBase.enumerate_all_roots(self, callback, arg)
From pypy.commits at gmail.com Fri Aug 5 04:00:52 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Fri, 05 Aug 2016 01:00:52 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: merge default
Message-ID: <57a447b4.c5aa1c0a.db8ad.7d07@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86025:9f2557766bf3
Date: 2016-08-05 10:00 +0200
http://bitbucket.org/pypy/pypy/changeset/9f2557766bf3/
Log: merge default
diff too long, truncating to 2000 out of 77823 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,10 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
+80ef432a32d9baa4b3c5a54c215e8ebe499f6374 release-5.1.2
+40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
+40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
+c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3
+7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -43,17 +43,17 @@
Samuele Pedroni
Matti Picus
Alex Gaynor
+ Philip Jenvey
Brian Kearns
- Philip Jenvey
+ Ronan Lamy
Michael Hudson
- Ronan Lamy
+ Manuel Jacob
David Schneider
- Manuel Jacob
Holger Krekel
Christian Tismer
Hakan Ardo
+ Richard Plangger
Benjamin Peterson
- Richard Plangger
Anders Chrigstrom
Eric van Riet Paap
Wim Lavrijsen
@@ -93,9 +93,9 @@
stian
Jan de Mooij
Tyler Wade
+ Vincent Legoll
Michael Foord
Stephan Diehl
- Vincent Legoll
Stefan Schwarzer
Valentino Volonghi
Tomek Meka
@@ -104,30 +104,34 @@
Bruno Gola
David Malcolm
Jean-Paul Calderone
+ Mark Young
Timo Paulssen
Squeaky
+ Devin Jeanpierre
Marius Gedminas
Alexandre Fayolle
Simon Burton
+ Stefano Rivera
Martin Matusiak
Konstantin Lopuhin
Wenzhu Man
John Witulski
Laurence Tratt
+ Raffael Tfirst
Ivan Sichmann Freitas
Greg Price
Dario Bertini
- Stefano Rivera
Mark Pearse
Simon Cross
+ Edd Barrett
Andreas Stührk
- Edd Barrett
+ Tobias Pape
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Spenser Bauman
Jeremy Thurgood
Paweł Piotr Przeradowski
- Spenser Bauman
Paul deGrandis
Ilya Osadchiy
marky1991
@@ -139,7 +143,6 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Tobias Pape
Wanja Saatkamp
Gerald Klix
Mike Blume
@@ -155,11 +158,13 @@
Dusty Phillips
Lukas Renggli
Guenter Jantzen
+ William Leslie
Ned Batchelder
Tim Felgentreff
Anton Gulenko
Amit Regmi
Ben Young
+ Sergey Matyunin
Nicolas Chauvat
Andrew Durdin
Andrew Chambers
@@ -172,7 +177,7 @@
Gintautas Miliauskas
Michael Twomey
Lucian Branescu Mihaila
- Devin Jeanpierre
+ anatoly techtonik
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -182,7 +187,6 @@
Brian Dorsey
Victor Stinner
Andrews Medina
- anatoly techtonik
Stuart Williams
Jasper Schulz
Christian Hudon
@@ -206,18 +210,17 @@
Alex Perry
Vaibhav Sood
Alan McIntyre
- William Leslie
Alexander Sedov
Attila Gobi
Jasper.Schulz
Christopher Pope
+ Florin Papa
Christian Tismer
Marc Abramowitz
Dan Stromberg
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
- Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
@@ -225,6 +228,7 @@
Pieter Zieschang
Gabriel
Lukas Vacek
+ Kunal Grover
Andrew Dalke
Sylvain Thenault
Jakub Stasiak
@@ -240,7 +244,6 @@
Kristjan Valur Jonsson
David Lievens
Neil Blakey-Milner
- Sergey Matyunin
Lutz Paelike
Lucio Torre
Lars Wassermann
@@ -252,9 +255,11 @@
Artur Lisiecki
Sergey Kishchenko
Ignas Mikalajunas
+ Alecsandru Patrascu
Christoph Gerum
Martin Blais
Lene Wagner
+ Catalin Gabriel Manciu
Tomo Cocoa
Kim Jin Su
Toni Mattis
@@ -266,8 +271,9 @@
Yury V. Zaytsev
Anna Katrina Dominguez
Bobby Impollonia
- timo at eistee.fritz.box
+ Vasantha Ganesh K
Andrew Thompson
+ florinpapa
Yusei Tahara
Aaron Tubbs
Ben Darnell
@@ -293,6 +299,7 @@
Stephan Busemann
Rafał Gałczyński
Matt Bogosian
+ timo
Christian Muirhead
Berker Peksag
James Lan
@@ -305,6 +312,7 @@
Boglarka Vezer
Chris Pressey
Buck Golemon
+ Diana Popa
Konrad Delong
Dinu Gherman
Chris Lambacher
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -85,10 +85,11 @@
pass
def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')):
+ import ast
result = []
for word in re_word.findall(line):
if word.startswith('"'):
- word = eval(word)
+ word = ast.literal_eval(word)
result.append(word)
return result
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -834,54 +834,63 @@
c2pread, c2pwrite = None, None
errread, errwrite = None, None
+ ispread = False
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
+ ispread = True
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
+ ispread = True
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
- p2cread = self._make_inheritable(p2cread)
+ p2cread = self._make_inheritable(p2cread, ispread)
# We just duplicated the handle, it has to be closed at the end
to_close.add(p2cread)
if stdin == PIPE:
to_close.add(p2cwrite)
+ ispwrite = False
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
+ ispwrite = True
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
+ ispwrite = True
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
- c2pwrite = self._make_inheritable(c2pwrite)
+ c2pwrite = self._make_inheritable(c2pwrite, ispwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(c2pwrite)
if stdout == PIPE:
to_close.add(c2pread)
+ ispwrite = False
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
+ ispwrite = True
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
+ ispwrite = True
elif stderr == STDOUT:
- errwrite = c2pwrite.handle # pass id to not close it
+ errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
- errwrite = self._make_inheritable(errwrite)
+ errwrite = self._make_inheritable(errwrite, ispwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(errwrite)
if stderr == PIPE:
@@ -892,13 +901,14 @@
errread, errwrite), to_close
- def _make_inheritable(self, handle):
+ def _make_inheritable(self, handle, close=False):
"""Return a duplicate of handle, which is inheritable"""
dupl = _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
- # If the initial handle was obtained with CreatePipe, close it.
- if not isinstance(handle, int):
+ # PyPy: If the initial handle was obtained with CreatePipe,
+ # close it.
+ if close:
handle.Close()
return dupl
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib-python/2.7/test/test_hash.py b/lib-python/2.7/test/test_hash.py
--- a/lib-python/2.7/test/test_hash.py
+++ b/lib-python/2.7/test/test_hash.py
@@ -174,7 +174,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
diff --git a/lib-python/2.7/test/test_sys_settrace.py b/lib-python/2.7/test/test_sys_settrace.py
--- a/lib-python/2.7/test/test_sys_settrace.py
+++ b/lib-python/2.7/test/test_sys_settrace.py
@@ -328,8 +328,8 @@
def test_13_genexp(self):
if self.using_gc:
+ gc.enable()
test_support.gc_collect()
- gc.enable()
try:
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v -r v Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after ` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
- def __hash__(self):
- raise TypeError("deque objects are unhashable")
+ __hash__ = None
def __copy__(self):
return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -199,10 +199,13 @@
return tp._alignmentofinstances()
@builtinify
-def byref(cdata):
+def byref(cdata, offset=0):
# "pointer" is imported at the end of this module to avoid circular
# imports
- return pointer(cdata)
+ ptr = pointer(cdata)
+ if offset != 0:
+ ptr._buffer[0] += offset
+ return ptr
def cdata_from_address(self, address):
# fix the address: turn it into as unsigned, in case it's a negative number
diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
--- a/lib_pypy/_pypy_interact.py
+++ b/lib_pypy/_pypy_interact.py
@@ -6,7 +6,7 @@
irc_header = "And now for something completely different"
-def interactive_console(mainmodule=None, quiet=False):
+def interactive_console(mainmodule=None, quiet=False, future_flags=0):
# set sys.{ps1,ps2} just before invoking the interactive interpreter. This
# mimics what CPython does in pythonrun.c
if not hasattr(sys, 'ps1'):
@@ -37,15 +37,17 @@
raise ImportError
from pyrepl.simple_interact import run_multiline_interactive_console
except ImportError:
- run_simple_interactive_console(mainmodule)
+ run_simple_interactive_console(mainmodule, future_flags=future_flags)
else:
- run_multiline_interactive_console(mainmodule)
+ run_multiline_interactive_console(mainmodule, future_flags=future_flags)
-def run_simple_interactive_console(mainmodule):
+def run_simple_interactive_console(mainmodule, future_flags=0):
import code
if mainmodule is None:
import __main__ as mainmodule
console = code.InteractiveConsole(mainmodule.__dict__, filename='')
+ if future_flags:
+ console.compile.compiler.flags |= future_flags
# some parts of code.py are copied here because it seems to be impossible
# to start an interactive console without printing at least one line
# of banner
diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py
--- a/lib_pypy/_pypy_irc_topic.py
+++ b/lib_pypy/_pypy_irc_topic.py
@@ -224,23 +224,9 @@
va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat
"""
-from string import ascii_uppercase, ascii_lowercase
-
def rot13(data):
- """ A simple rot-13 encoder since `str.encode('rot13')` was removed from
- Python as of version 3.0. It rotates both uppercase and lowercase letters individually.
- """
- total = []
- for char in data:
- if char in ascii_uppercase:
- index = (ascii_uppercase.find(char) + 13) % 26
- total.append(ascii_uppercase[index])
- elif char in ascii_lowercase:
- index = (ascii_lowercase.find(char) + 13) % 26
- total.append(ascii_lowercase[index])
- else:
- total.append(char)
- return "".join(total)
+ return ''.join(chr(ord(c)+(13 if 'A'<=c.upper()<='M' else
+ -13 if 'N'<=c.upper()<='Z' else 0)) for c in data)
def some_topic():
import time
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
- return struct_rusage((
- float(c_struct.ru_utime),
- float(c_struct.ru_stime),
- c_struct.ru_maxrss,
- c_struct.ru_ixrss,
- c_struct.ru_idrss,
- c_struct.ru_isrss,
- c_struct.ru_minflt,
- c_struct.ru_majflt,
- c_struct.ru_nswap,
- c_struct.ru_inblock,
- c_struct.ru_oublock,
- c_struct.ru_msgsnd,
- c_struct.ru_msgrcv,
- c_struct.ru_nsignals,
- c_struct.ru_nvcsw,
- c_struct.ru_nivcsw))
def wait3(options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait3(status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
def wait4(pid, options):
- status = c_int()
- _rusage = _struct_rusage()
- pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+ status = ffi.new("int *")
+ ru = ffi.new("struct rusage *")
+ pid = lib.wait4(pid, status, options, ru)
- rusage = create_struct_rusage(_rusage)
+ rusage = _make_struct_rusage(ru)
- return pid, status.value, rusage
+ return pid, status[0], rusage
diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_build.py
@@ -0,0 +1,91 @@
+# Note: uses the CFFI out-of-line ABI mode. We can't use the API
+# mode because ffi.compile() needs to run the compiler, which
+# needs 'subprocess', which needs 'msvcrt' and '_subprocess',
+# which depend on '_pypy_winbase_cffi' already.
+#
+# Note that if you need to regenerate _pypy_winbase_cffi and
+# can't use a preexisting PyPy to do that, then running this
+# file should work as long as 'subprocess' is not imported
+# by cffi. I had to hack in 'cffi._pycparser' to move an
+#'import subprocess' to the inside of a function. (Also,
+# CPython+CFFI should work as well.)
+#
+# This module supports both msvcrt.py and _subprocess.py.
+
+from cffi import FFI
+
+ffi = FFI()
+
+ffi.set_source("_pypy_winbase_cffi", None)
+
+# ---------- MSVCRT ----------
+
+ffi.cdef("""
+typedef unsigned short wint_t;
+
+int _open_osfhandle(intptr_t osfhandle, int flags);
+intptr_t _get_osfhandle(int fd);
+int _setmode(int fd, int mode);
+int _locking(int fd, int mode, long nbytes);
+
+int _kbhit(void);
+int _getch(void);
+wint_t _getwch(void);
+int _getche(void);
+wint_t _getwche(void);
+int _putch(int);
+wint_t _putwch(wchar_t);
+int _ungetch(int);
+wint_t _ungetwch(wint_t);
+""")
+
+# ---------- SUBPROCESS ----------
+
+ffi.cdef("""
+typedef struct {
+ DWORD cb;
+ char * lpReserved;
+ char * lpDesktop;
+ char * lpTitle;
+ DWORD dwX;
+ DWORD dwY;
+ DWORD dwXSize;
+ DWORD dwYSize;
+ DWORD dwXCountChars;
+ DWORD dwYCountChars;
+ DWORD dwFillAttribute;
+ DWORD dwFlags;
+ WORD wShowWindow;
+ WORD cbReserved2;
+ LPBYTE lpReserved2;
+ HANDLE hStdInput;
+ HANDLE hStdOutput;
+ HANDLE hStdError;
+} STARTUPINFO, *LPSTARTUPINFO;
+
+typedef struct {
+ HANDLE hProcess;
+ HANDLE hThread;
+ DWORD dwProcessId;
+ DWORD dwThreadId;
+} PROCESS_INFORMATION, *LPPROCESS_INFORMATION;
+
+DWORD WINAPI GetVersion(void);
+BOOL WINAPI CreatePipe(PHANDLE, PHANDLE, void *, DWORD);
+BOOL WINAPI CloseHandle(HANDLE);
+HANDLE WINAPI GetCurrentProcess(void);
+BOOL WINAPI DuplicateHandle(HANDLE, HANDLE, HANDLE, LPHANDLE,
+ DWORD, BOOL, DWORD);
+BOOL WINAPI CreateProcessA(char *, char *, void *,
+ void *, BOOL, DWORD, char *,
+ char *, LPSTARTUPINFO, LPPROCESS_INFORMATION);
+DWORD WINAPI WaitForSingleObject(HANDLE, DWORD);
+BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD);
+BOOL WINAPI TerminateProcess(HANDLE, UINT);
+HANDLE WINAPI GetStdHandle(DWORD);
+""")
+
+# --------------------
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_cffi.py
@@ -0,0 +1,10 @@
+# auto-generated file
+import _cffi_backend
+
+ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
+ _version = 0x2601,
+ _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
+ _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
+ _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
+ _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
+)
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+ for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include
+#include
+#include
+#include
+
+static const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+ { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+ return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+ return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+ struct rlimit rl;
+ if (getrlimit(resource, &rl) == -1)
+ return -1;
+ result[0] = rl.rlim_cur;
+ result[1] = rl.rlim_max;
+ return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+ struct rlimit rl;
+ rl.rlim_cur = cur & RLIM_INFINITY;
+ rl.rlim_max = max & RLIM_INFINITY;
+ return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+ const char *name;
+ long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+ ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py
--- a/lib_pypy/_subprocess.py
+++ b/lib_pypy/_subprocess.py
@@ -4,151 +4,105 @@
subprocess module on Windows.
"""
+import sys
+if sys.platform != 'win32':
+ raise ImportError("The '_subprocess' module is only available on Windows")
# Declare external Win32 functions
-import ctypes
-
-_kernel32 = ctypes.WinDLL('kernel32')
-
-_CloseHandle = _kernel32.CloseHandle
-_CloseHandle.argtypes = [ctypes.c_int]
-_CloseHandle.restype = ctypes.c_int
-
-_CreatePipe = _kernel32.CreatePipe
-_CreatePipe.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
- ctypes.c_void_p, ctypes.c_int]
-_CreatePipe.restype = ctypes.c_int
-
-_GetCurrentProcess = _kernel32.GetCurrentProcess
-_GetCurrentProcess.argtypes = []
-_GetCurrentProcess.restype = ctypes.c_int
+from _pypy_winbase_cffi import ffi as _ffi
+_kernel32 = _ffi.dlopen('kernel32')
GetVersion = _kernel32.GetVersion
-GetVersion.argtypes = []
-GetVersion.restype = ctypes.c_int
-_DuplicateHandle = _kernel32.DuplicateHandle
-_DuplicateHandle.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
- ctypes.POINTER(ctypes.c_int),
- ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_DuplicateHandle.restype = ctypes.c_int
-
-_WaitForSingleObject = _kernel32.WaitForSingleObject
-_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
-_WaitForSingleObject.restype = ctypes.c_int
-
-_GetExitCodeProcess = _kernel32.GetExitCodeProcess
-_GetExitCodeProcess.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
-_GetExitCodeProcess.restype = ctypes.c_int
-
-_TerminateProcess = _kernel32.TerminateProcess
-_TerminateProcess.argtypes = [ctypes.c_int, ctypes.c_int]
-_TerminateProcess.restype = ctypes.c_int
-
-_GetStdHandle = _kernel32.GetStdHandle
-_GetStdHandle.argtypes = [ctypes.c_int]
-_GetStdHandle.restype = ctypes.c_int
-
-class _STARTUPINFO(ctypes.Structure):
- _fields_ = [('cb', ctypes.c_int),
- ('lpReserved', ctypes.c_void_p),
- ('lpDesktop', ctypes.c_char_p),
- ('lpTitle', ctypes.c_char_p),
- ('dwX', ctypes.c_int),
- ('dwY', ctypes.c_int),
- ('dwXSize', ctypes.c_int),
- ('dwYSize', ctypes.c_int),
- ('dwXCountChars', ctypes.c_int),
- ('dwYCountChars', ctypes.c_int),
- ("dwFillAttribute", ctypes.c_int),
- ("dwFlags", ctypes.c_int),
- ("wShowWindow", ctypes.c_short),
- ("cbReserved2", ctypes.c_short),
- ("lpReserved2", ctypes.c_void_p),
- ("hStdInput", ctypes.c_int),
- ("hStdOutput", ctypes.c_int),
- ("hStdError", ctypes.c_int)
- ]
-
-class _PROCESS_INFORMATION(ctypes.Structure):
- _fields_ = [("hProcess", ctypes.c_int),
- ("hThread", ctypes.c_int),
- ("dwProcessID", ctypes.c_int),
- ("dwThreadID", ctypes.c_int)]
-
-_CreateProcess = _kernel32.CreateProcessA
-_CreateProcess.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p,
- ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p,
- ctypes.POINTER(_STARTUPINFO), ctypes.POINTER(_PROCESS_INFORMATION)]
-_CreateProcess.restype = ctypes.c_int
-
-del ctypes
# Now the _subprocess module implementation
-from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError
+def _WinError():
+ code, message = _ffi.getwinerror()
+ raise WindowsError(code, message)
-class _handle:
- def __init__(self, handle):
- self.handle = handle
+_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
+
+class _handle(object):
+ def __init__(self, c_handle):
+ # 'c_handle' is a cffi cdata of type HANDLE, which is basically 'void *'
+ self.c_handle = c_handle
+ if int(self) != -1:
+ self.c_handle = _ffi.gc(self.c_handle, _kernel32.CloseHandle)
def __int__(self):
- return self.handle
+ return int(_ffi.cast("intptr_t", self.c_handle))
- def __del__(self):
- if self.handle is not None:
- _CloseHandle(self.handle)
+ def __repr__(self):
+ return '<_subprocess.handle %d at 0x%x>' % (int(self), id(self))
def Detach(self):
- handle, self.handle = self.handle, None
- return handle
+ h = int(self)
+ if h != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ return h
def Close(self):
- if self.handle not in (-1, None):
- _CloseHandle(self.handle)
- self.handle = None
+ if int(self) != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ _kernel32.CloseHandle(c_handle)
def CreatePipe(attributes, size):
- read = _c_int()
- write = _c_int()
+ handles = _ffi.new("HANDLE[2]")
- res = _CreatePipe(_byref(read), _byref(write), None, size)
+ res = _kernel32.CreatePipe(handles, handles + 1, _ffi.NULL, size)
if not res:
raise _WinError()
- return _handle(read.value), _handle(write.value)
+ return _handle(handles[0]), _handle(handles[1])
def GetCurrentProcess():
- return _handle(_GetCurrentProcess())
+ return _handle(_kernel32.GetCurrentProcess())
def DuplicateHandle(source_process, source, target_process, access, inherit, options=0):
- target = _c_int()
+ # CPython: the first three arguments are expected to be integers
+ target = _ffi.new("HANDLE[1]")
- res = _DuplicateHandle(int(source_process), int(source), int(target_process),
- _byref(target),
- access, inherit, options)
+ res = _kernel32.DuplicateHandle(
+ _ffi.cast("HANDLE", source_process),
+ _ffi.cast("HANDLE", source),
+ _ffi.cast("HANDLE", target_process),
+ target, access, inherit, options)
if not res:
raise _WinError()
- return _handle(target.value)
+ return _handle(target[0])
+
+def _z(input):
+ if input is None:
+ return _ffi.NULL
+ if isinstance(input, basestring):
+ return str(input)
+ raise TypeError("string/unicode/None expected, got %r" % (
+ type(input).__name__,))
def CreateProcess(name, command_line, process_attr, thread_attr,
inherit, flags, env, start_dir, startup_info):
- si = _STARTUPINFO()
+ si = _ffi.new("STARTUPINFO *")
if startup_info is not None:
si.dwFlags = startup_info.dwFlags
si.wShowWindow = startup_info.wShowWindow
+ # CPython: these three handles are expected to be _handle objects
if startup_info.hStdInput:
- si.hStdInput = int(startup_info.hStdInput)
+ si.hStdInput = startup_info.hStdInput.c_handle
if startup_info.hStdOutput:
- si.hStdOutput = int(startup_info.hStdOutput)
+ si.hStdOutput = startup_info.hStdOutput.c_handle
if startup_info.hStdError:
- si.hStdError = int(startup_info.hStdError)
+ si.hStdError = startup_info.hStdError.c_handle
- pi = _PROCESS_INFORMATION()
+ pi = _ffi.new("PROCESS_INFORMATION *")
if env is not None:
envbuf = ""
@@ -156,47 +110,55 @@
envbuf += "%s=%s\0" % (k, v)
envbuf += '\0'
else:
- envbuf = None
+ envbuf = _ffi.NULL
- res = _CreateProcess(name, command_line, None, None, inherit, flags, envbuf,
- start_dir, _byref(si), _byref(pi))
+ res = _kernel32.CreateProcessA(_z(name), _z(command_line), _ffi.NULL,
+ _ffi.NULL, inherit, flags, envbuf,
+ _z(start_dir), si, pi)
if not res:
raise _WinError()
- return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID
+ return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId, pi.dwThreadId
def WaitForSingleObject(handle, milliseconds):
- res = _WaitForSingleObject(int(handle), milliseconds)
-
+ # CPython: the first argument is expected to be an integer.
+ res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
+ milliseconds)
if res < 0:
raise _WinError()
return res
def GetExitCodeProcess(handle):
- code = _c_int()
+ # CPython: the first argument is expected to be an integer.
+ code = _ffi.new("DWORD[1]")
- res = _GetExitCodeProcess(int(handle), _byref(code))
+ res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
if not res:
raise _WinError()
- return code.value
+ return code[0]
def TerminateProcess(handle, exitcode):
- res = _TerminateProcess(int(handle), exitcode)
+ # CPython: the first argument is expected to be an integer.
+ # The second argument is silently wrapped in a UINT.
+ res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+ _ffi.cast("UINT", exitcode))
if not res:
raise _WinError()
def GetStdHandle(stdhandle):
- res = _GetStdHandle(stdhandle)
+ stdhandle = _ffi.cast("DWORD", stdhandle)
+ res = _kernel32.GetStdHandle(stdhandle)
if not res:
return None
else:
- return res
+ # note: returns integer, not handle object
+ return int(_ffi.cast("intptr_t", res))
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.5.2
+Version: 1.7.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.5.2"
-__version_info__ = (1, 5, 2)
+__version__ = "1.7.0"
+__version_info__ = (1, 7, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -57,6 +57,12 @@
# define _CFFI_UNUSED_FN /* nothing */
#endif
+#ifdef __cplusplus
+# ifndef _Bool
+# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+# endif
+#endif
+
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.5.2"
+ "\ncompiled with cffi version: 1.7.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
--- a/lib_pypy/cffi/_pycparser/__init__.py
+++ b/lib_pypy/cffi/_pycparser/__init__.py
@@ -10,7 +10,6 @@
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '2.14'
-from subprocess import Popen, PIPE
from .c_parser import CParser
@@ -28,6 +27,7 @@
When successful, returns the preprocessed file's contents.
Errors from cpp will be printed out.
"""
+ from subprocess import Popen, PIPE
path_list = [cpp_path]
if isinstance(cpp_args, list):
path_list += cpp_args
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -299,6 +299,23 @@
"""
return self._backend.string(cdata, maxlen)
+ def unpack(self, cdata, length):
+ """Unpack an array of C data of the given length,
+ returning a Python string/unicode/list.
+
+ If 'cdata' is a pointer to 'char', returns a byte string.
+ It does not stop at the first null. This is equivalent to:
+ ffi.buffer(cdata, length)[:]
+
+ If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
+ 'length' is measured in wchar_t's; it is not the size in bytes.
+
+ If 'cdata' is a pointer to anything else, returns a list of
+ 'length' items. This is a faster equivalent to:
+ [cdata[i] for i in range(length)]
+ """
+ return self._backend.unpack(cdata, length)
+
def buffer(self, cdata, size=-1):
"""Return a read-write buffer object that references the raw C data
pointed to by the given 'cdata'. The 'cdata' must be a pointer or
@@ -315,8 +332,8 @@
def from_buffer(self, python_buffer):
"""Return a that points to the data of the
given Python object, which must support the buffer interface.
- Note that this is not meant to be used on the built-in types str,
- unicode, or bytearray (you can build 'char[]' arrays explicitly)
+ Note that this is not meant to be used on the built-in types
+ str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
"""
@@ -380,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
@@ -721,6 +725,26 @@
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
+ def list_types(self):
+ """Returns the user type names known to this FFI instance.
+ This returns a tuple containing three lists of names:
+ (typedef_names, names_of_structs, names_of_unions)
+ """
+ typedefs = []
+ structs = []
+ unions = []
+ for key in self._parser._declarations:
+ if key.startswith('typedef '):
+ typedefs.append(key[8:])
+ elif key.startswith('struct '):
+ structs.append(key[7:])
+ elif key.startswith('union '):
+ unions.append(key[6:])
+ typedefs.sort()
+ structs.sort()
+ unions.sort()
+ return (typedefs, structs, unions)
+
def _load_backend_lib(backend, name, flags):
if name is None:
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -205,9 +205,7 @@
def __nonzero__(self):
return bool(self._address)
-
- def __bool__(self):
- return bool(self._address)
+ __bool__ = __nonzero__
@classmethod
def _to_ctypes(cls, value):
@@ -460,6 +458,12 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
+ __bool__ = __nonzero__
if kind == 'float':
@staticmethod
@@ -993,6 +997,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py
--- a/lib_pypy/cffi/commontypes.py
+++ b/lib_pypy/cffi/commontypes.py
@@ -35,8 +35,11 @@
"you call ffi.set_unicode()" % (commontype,))
else:
if commontype == cdecl:
- raise api.FFIError("Unsupported type: %r. Please file a bug "
- "if you think it should be." % (commontype,))
+ raise api.FFIError(
+ "Unsupported type: %r. Please look at "
+ "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
+ "and file an issue if you think this type should really "
+ "be supported." % (commontype,))
result, quals = parser.parse_type_and_quals(cdecl) # recursive
assert isinstance(result, model.BaseTypeByIdentity)
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+ r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -88,6 +89,12 @@
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
+ #
+ # input: `extern "Python+C" int foo(int);`
+ # output:
+ # void __cffi_extern_python_plus_c_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
- parts.append('void __cffi_extern_python_start; ')
+ if 'C' in match.group(1):
+ parts.append('void __cffi_extern_python_plus_c_start; ')
+ else:
+ parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
break
#
try:
- self._inside_extern_python = False
+ self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -376,8 +386,10 @@
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
- elif self._inside_extern_python:
+ elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
+ elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+ tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
- self._inside_extern_python = not self._inside_extern_python
- assert self._inside_extern_python == (
- decl.name == '__cffi_extern_python_start')
+ self._inside_extern_python = decl.name
else:
- if self._inside_extern_python:
+ if self._inside_extern_python !='__cffi_extern_python_stop':
raise api.CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -814,7 +814,7 @@
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
- prnt(" (void)((p->%s) << 1); /* check that '%s.%s' is "
+ prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is "
"an integer */" % (fname, cname, fname))
continue
# only accept exactly the type declared, except that '[]'
@@ -991,7 +991,7 @@
prnt('static int %s(unsigned long long *o)' % funcname)
prnt('{')
prnt(' int n = (%s) <= 0;' % (name,))
- prnt(' *o = (unsigned long long)((%s) << 0);'
+ prnt(' *o = (unsigned long long)((%s) | 0);'
' /* check that %s is an integer */' % (name, name))
if check_value is not None:
if check_value > 0:
@@ -1145,11 +1145,11 @@
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
+ _generate_cpy_dllexport_python_collecttype = \
+ _generate_cpy_extern_python_plus_c_collecttype = \
+ _generate_cpy_extern_python_collecttype
- def _generate_cpy_dllexport_python_collecttype(self, tp, name):
- self._generate_cpy_extern_python_collecttype(tp, name)
-
- def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+ def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
@@ -1184,11 +1184,7 @@
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
- if dllexport:
- tag = 'CFFI_DLLEXPORT'
- else:
- tag = 'static'
- prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+ prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
@@ -1206,8 +1202,14 @@
prnt()
self._num_externpy += 1
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ self._extern_python_decl(tp, name, 'static ')
+
def _generate_cpy_dllexport_python_decl(self, tp, name):
- self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+ self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+ def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+ self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
@@ -1220,8 +1222,9 @@
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
- def _generate_cpy_dllexport_python_ctx(self, tp, name):
- self._generate_cpy_extern_python_ctx(tp, name)
+ _generate_cpy_dllexport_python_ctx = \
+ _generate_cpy_extern_python_plus_c_ctx = \
+ _generate_cpy_extern_python_ctx
def _string_literal(self, s):
def _char_repr(c):
@@ -1231,7 +1234,7 @@
if c == '\n': return '\\n'
return '\\%03o' % ord(c)
lines = []
- for line in s.splitlines(True):
+ for line in s.splitlines(True) or ['']:
lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
return ' \\\n'.join(lines)
@@ -1247,7 +1250,7 @@
def _emit_bytecode_UnknownIntegerType(self, tp, index):
s = ('_cffi_prim_int(sizeof(%s), (\n'
- ' ((%s)-1) << 0 /* check that %s is an integer type */\n'
+ ' ((%s)-1) | 0 /* check that %s is an integer type */\n'
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
@@ -1319,7 +1322,9 @@
s = s.encode('ascii')
super(NativeIO, self).write(s)
-def _make_c_or_py_source(ffi, module_name, preamble, target_file):
+def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
+ if verbose:
+ print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
@@ -1331,6 +1336,8 @@
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
+ if verbose:
+ print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
@@ -1343,12 +1350,14 @@
os.rename(tmp_file, target_file)
return True
-def make_c_source(ffi, module_name, preamble, target_c_file):
+def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
- return _make_c_or_py_source(ffi, module_name, preamble, target_c_file)
+ return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
+ verbose)
-def make_py_source(ffi, module_name, target_py_file):
- return _make_c_or_py_source(ffi, module_name, None, target_py_file)
+def make_py_source(ffi, module_name, target_py_file, verbose=False):
+ return _make_c_or_py_source(ffi, module_name, None, target_py_file,
+ verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
@@ -1438,7 +1447,8 @@
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
- updated = make_c_source(ffi, module_name, preamble, c_file)
+ updated = make_c_source(ffi, module_name, preamble, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
@@ -1458,7 +1468,8 @@
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
- updated = make_py_source(ffi, module_name, c_file)
+ updated = make_py_source(ffi, module_name, c_file,
+ verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
@@ -1484,4 +1495,7 @@
def typeof_disabled(*args, **kwds):
raise NotImplementedError
ffi._typeof = typeof_disabled
+ for name in dir(ffi):
+ if not name.startswith('_') and not hasattr(module.ffi, name):
+ setattr(ffi, name, NotImplemented)
return module.lib
diff --git a/lib_pypy/ctypes_config_cache/.empty b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
- size = 32 if sys.maxint <= 2**32 else 64
- filename = '_%s_%s_.py' % (basename, size)
- dumpcache.dumpcache(__file__, filename, config)
- #
- filename = os.path.join(os.path.dirname(__file__),
- '_%s_cache.py' % (basename,))
- g = open(filename, 'w')
- print >> g, '''\
-import sys
-_size = 32 if sys.maxint <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
- globals(), locals(), ["*"])
-globals().update(_mod.__dict__)\
-''' % (basename,)
- g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
- ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
- 'LC_CTYPE',
- 'LC_TIME',
- 'LC_COLLATE',
- 'LC_MONETARY',
- 'LC_MESSAGES',
- 'LC_NUMERIC',
- 'LC_ALL',
- 'CHAR_MAX',
-]
-
-class LocaleConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
- 'locale.h'])
-for key in _CONSTANTS:
- setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
- if value is None:
- del config[key]
- _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
- # list of all possible names
- langinfo_names = [
- "RADIXCHAR", "THOUSEP", "CRNCYSTR",
- "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
- "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
- "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
- ]
- for i in range(1, 8):
- langinfo_names.append("DAY_%d" % i)
- langinfo_names.append("ABDAY_%d" % i)
- for i in range(1, 13):
- langinfo_names.append("MON_%d" % i)
- langinfo_names.append("ABMON_%d" % i)
-
- class LanginfoConfigure:
- _compilation_info_ = eci
- nl_item = SimpleType('nl_item')
- for key in langinfo_names:
- setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
- langinfo_config = configure(LanginfoConfigure)
- for key, value in langinfo_config.items():
- if value is None:
- del langinfo_config[key]
- langinfo_names.remove(key)
- config.update(langinfo_config)
- _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
- filename = os.path.join(_dirpath, name)
- d = {'__file__': filename}
- path = sys.path[:]
- try:
- sys.path.insert(0, _dirpath)
- execfile(filename, d)
- finally:
- sys.path[:] = path
-
-def try_rebuild():
- size = 32 if sys.maxint <= 2**32 else 64
- # remove the files '_*_size_.py'
- left = {}
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_%s_.py' % size) or
- p.endswith('_%s_.pyc' % size)):
- os.unlink(os.path.join(_dirpath, p))
- elif p.startswith('_') and (p.endswith('_.py') or
- p.endswith('_.pyc')):
- for i in range(2, len(p)-4):
- left[p[:i]] = True
- # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
- for p in os.listdir(_dirpath):
- if p.startswith('_') and (p.endswith('_cache.py') or
- p.endswith('_cache.pyc')):
- if p[:-9] not in left:
- os.unlink(os.path.join(_dirpath, p))
- #
- for p in os.listdir(_dirpath):
- if p.endswith('.ctc.py'):
- try:
- rebuild_one(p)
- except Exception, e:
- log.ERROR("Running %s:\n %s: %s" % (
- os.path.join(_dirpath, p),
- e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
- try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
- ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
- SimpleType)
-
-
-_CONSTANTS = (
- 'RLIM_INFINITY',
- 'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
- 'RLIMIT_CPU',
- 'RLIMIT_FSIZE',
- 'RLIMIT_DATA',
- 'RLIMIT_STACK',
- 'RLIMIT_CORE',
- 'RLIMIT_RSS',
- 'RLIMIT_NPROC',
- 'RLIMIT_NOFILE',
- 'RLIMIT_OFILE',
- 'RLIMIT_MEMLOCK',
- 'RLIMIT_AS',
- 'RLIMIT_LOCKS',
- 'RLIMIT_SIGPENDING',
- 'RLIMIT_MSGQUEUE',
- 'RLIMIT_NICE',
- 'RLIMIT_RTPRIO',
- 'RLIMIT_VMEM',
-
- 'RUSAGE_BOTH',
- 'RUSAGE_SELF',
- 'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
- _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
- rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
- setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
- setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
- if config[key] is not None:
- optional_constants.append(key)
- else:
- del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -839,7 +839,7 @@
month = self._month
if day is None:
day = self._day
- return date(year, month, day)
+ return date.__new__(type(self), year, month, day)
# Comparisons of date objects with other.
@@ -1356,7 +1356,8 @@
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
- return time(hour, minute, second, microsecond, tzinfo)
+ return time.__new__(type(self),
+ hour, minute, second, microsecond, tzinfo)
def __nonzero__(self):
if self.second or self.microsecond:
@@ -1566,8 +1567,9 @@
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
- return datetime(year, month, day, hour, minute, second, microsecond,
- tzinfo)
+ return datetime.__new__(type(self),
+ year, month, day, hour, minute, second,
+ microsecond, tzinfo)
def astimezone(self, tz):
if not isinstance(tz, tzinfo):
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: greenlet
-Version: 0.4.9
+Version: 0.4.10
Summary: Lightweight in-process concurrent programming
Home-page: https://github.com/python-greenlet/greenlet
Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -1,7 +1,7 @@
import sys
import _continuation
-__version__ = "0.4.9"
+__version__ = "0.4.10"
# ____________________________________________________________
# Exceptions
diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py
--- a/lib_pypy/msvcrt.py
+++ b/lib_pypy/msvcrt.py
@@ -7,26 +7,39 @@
# XXX incomplete: implemented only functions needed by subprocess.py
# PAC: 2010/08 added MS locking for Whoosh
-import ctypes
+# 07/2016: rewrote in CFFI
+
+import sys
+if sys.platform != 'win32':
+ raise ImportError("The 'msvcrt' module is only available on Windows")
+
+import _rawffi
+from _pypy_winbase_cffi import ffi as _ffi
+_lib = _ffi.dlopen(_rawffi.get_libc().name)
+
import errno
-from ctypes_support import standard_c_lib as _c
-from ctypes_support import get_errno
-
-try:
- open_osfhandle = _c._open_osfhandle
-except AttributeError: # we are not on windows
- raise ImportError
try: from __pypy__ import builtinify, validate_fd
except ImportError: builtinify = validate_fd = lambda f: f
-open_osfhandle.argtypes = [ctypes.c_int, ctypes.c_int]
-open_osfhandle.restype = ctypes.c_int
+def _ioerr():
+ e = _ffi.errno
+ raise IOError(e, errno.errorcode[e])
-_get_osfhandle = _c._get_osfhandle
-_get_osfhandle.argtypes = [ctypes.c_int]
-_get_osfhandle.restype = ctypes.c_int
+
+ at builtinify
+def open_osfhandle(fd, flags):
+ """"open_osfhandle(handle, flags) -> file descriptor
+
+ Create a C runtime file descriptor from the file handle handle. The
+ flags parameter should be a bitwise OR of os.O_APPEND, os.O_RDONLY,
+ and os.O_TEXT. The returned file descriptor may be used as a parameter
+ to os.fdopen() to create a file object."""
+ fd = _lib._open_osfhandle(fd, flags)
+ if fd == -1:
+ _ioerr()
+ return fd
@builtinify
def get_osfhandle(fd):
@@ -38,62 +51,74 @@
validate_fd(fd)
except OSError as e:
raise IOError(*e.args)
- return _get_osfhandle(fd)
+ result = _lib._get_osfhandle(fd)
+ if result == -1:
+ _ioerr()
+ return result
-setmode = _c._setmode
-setmode.argtypes = [ctypes.c_int, ctypes.c_int]
-setmode.restype = ctypes.c_int
+ at builtinify
+def setmode(fd, flags):
+ """setmode(fd, mode) -> Previous mode
+
+ Set the line-end translation mode for the file descriptor fd. To set
+ it to text mode, flags should be os.O_TEXT; for binary, it should be
+ os.O_BINARY."""
+ flags = _lib._setmode(fd, flags)
+ if flags == -1:
+ _ioerr()
+ return flags
LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5)
-_locking = _c._locking
-_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
From pypy.commits at gmail.com Fri Aug 5 05:17:41 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Fri, 05 Aug 2016 02:17:41 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: switch sockets to the
register_finalizer approach
Message-ID: <57a459b5.8aacc20a.74e1d.e6f2@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86026:63fff351448e
Date: 2016-08-05 11:17 +0200
http://bitbucket.org/pypy/pypy/changeset/63fff351448e/
Log: switch sockets to the register_finalizer approach
this is nice because it means that if resource_warnings are
disabled, no overhead is imposed at all
diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py
--- a/pypy/module/_socket/interp_socket.py
+++ b/pypy/module/_socket/interp_socket.py
@@ -159,16 +159,11 @@
register_socket(space, sock)
if self.space.sys.track_resources:
self.w_tb = self.space.format_traceback()
+ self.register_finalizer(space)
- def __del__(self):
+ def _finalize_(self):
is_open = self.sock.fd >= 0
if is_open and self.space.sys.track_resources:
- self.enqueue_for_destruction(self.space, W_Socket.destructor,
- '__del__ method of ')
-
- def destructor(self):
- assert isinstance(self, W_Socket)
- if self.space.sys.track_resources:
w_repr = self.space.repr(self)
str_repr = self.space.str_w(w_repr)
w_msg = self.space.wrap("WARNING: unclosed " + str_repr)
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -455,11 +455,11 @@
File ".*", line .*, in fn
""", msg)
#
- # check with track_resources enabled in the destructor BUT with a
- # file which was created when track_resources was disabled
+ # track_resources is enabled after the construction of the socket. in
+ # this case, the socket is not registered for finalization at all, so
+ # we don't see a message
msg = fn(False, True)
- assert self.regex_search("WARNING: unclosed ", msg)
- assert "Created at" not in msg
+ assert msg == ''
def test_socket_close_error(self):
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -428,4 +428,5 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
+FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
From pypy.commits at gmail.com Fri Aug 5 05:18:04 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Fri, 05 Aug 2016 02:18:04 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: jitlog encode fail arguments
(how could I forget that :)
Message-ID: <57a459cc.262ec20a.1424d.e209@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86027:912c2e29f742
Date: 2016-08-05 11:17 +0200
http://bitbucket.org/pypy/pypy/changeset/912c2e29f742/
Log: jitlog encode fail arguments (how could I forget that :) ppc
misaligned_is_fine = Ture
diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py
--- a/rpython/rlib/rawstorage.py
+++ b/rpython/rlib/rawstorage.py
@@ -48,7 +48,8 @@
try:
cpuname = detect_cpu.autodetect()
misaligned_is_fine = cpuname.startswith('x86') or \
- cpuname.startswith('s390x')
+ cpuname.startswith('s390x') or \
+ cpuname.startswith('ppc')
del cpuname
except detect_cpu.ProcessorAutodetectError:
misaligned_is_fine = False
diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py
--- a/rpython/rlib/rjitlog/rjitlog.py
+++ b/rpython/rlib/rjitlog/rjitlog.py
@@ -509,7 +509,9 @@
""" an operation is written as follows:
\
\
- ,,...,,
+ ,,..., \
+
+ ,...
The marker indicates if the last argument is
a descr or a normal argument.
"""
@@ -518,16 +520,21 @@
le_opnum = encode_le_16bit(op.getopnum())
str_res = self.var_to_str(op)
line = ','.join([str_res] + str_args)
+ failargslist = op.getfailargs()
+ failargs = ''
+ if failargslist:
+ failargs = ','.join([self.var_to_str(farg) for farg in failargslist])
+ #
if descr:
descr_str = descr.repr_of_descr()
line = line + ',' + descr_str
string = encode_str(line)
descr_number = compute_unique_id(descr)
le_descr_number = encode_le_addr(descr_number)
- return MARK_RESOP_DESCR, le_opnum + string + le_descr_number
+ return MARK_RESOP_DESCR, le_opnum + string + le_descr_number + encode_str(failargs)
else:
string = encode_str(line)
- return MARK_RESOP, le_opnum + string
+ return MARK_RESOP, le_opnum + string + encode_str(failargs)
def write_core_dump(self, operations, i, op, ops_offset):
@@ -579,6 +586,8 @@
return ''.join(dump)
def var_to_str(self, arg):
+ if arg is None:
+ return '-'
try:
mv = self.memo[arg]
except KeyError:
From pypy.commits at gmail.com Fri Aug 5 08:18:51 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Fri, 05 Aug 2016 05:18:51 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: typos in draft
Message-ID: <57a4842b.031dc20a.af26a.3786@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5662:34250d9373c4
Date: 2016-08-05 14:18 +0200
http://bitbucket.org/pypy/extradoc/changeset/34250d9373c4/
Log: typos in draft
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -8,14 +8,14 @@
Both VMProf and JV share some common goals. That is the reason why they are now both packaged together.
www.vmprof.com also got updated with various bugfixes and changes including an all new interface to JV.
-A advertisment: We constantly improve tooling and libraries around the Python/PyPy eco system.
+An advertisment: We constantly improve tooling and libraries around the Python/PyPy eco system.
Here are a three examples you might also want to use in your Python projects:
* VMProf - A statistical CPU profiler
* RevDB - A reverse debugger for Python
* CFFI - Foreign Function Interface that avoids CPyExt
-A brand new JitViewer
+A "brand new" JitViewer
---------------------
The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools, most notably JV. Additionaly the logging output of a long running program took a lot of disk space.
From pypy.commits at gmail.com Fri Aug 5 09:38:04 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 06:38:04 -0700 (PDT)
Subject: [pypy-commit] pypy default: improve the test, still failing
Message-ID: <57a496bc.c19d1c0a.83f5f.0a3a@mx.google.com>
Author: fijal
Branch:
Changeset: r86028:cb1aff2e4d19
Date: 2016-08-05 15:37 +0200
http://bitbucket.org/pypy/pypy/changeset/cb1aff2e4d19/
Log: improve the test, still failing
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -2,7 +2,8 @@
from rpython.rlib import jit
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rvmprof import cintf
+from rpython.rlib.rvmprof import cintf, vmprof_execute_code, register_code,\
+ register_code_object_class, _get_vmprof
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
@@ -14,6 +15,7 @@
def helper():
stack = cintf.vmprof_tl_stack.getraw()
+ print stack
if stack:
# not during tracing
visited.append(stack.c_value)
@@ -22,15 +24,34 @@
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=[], reds='auto')
+ driver = jit.JitDriver(greens=['code'], reds='auto')
- def f(n):
+ class CodeObj(object):
+ pass
+
+ def get_code_fn(code, arg):
+ return code
+
+ def get_name(code):
+ return "foo"
+
+ register_code_object_class(CodeObj, get_name)
+
+ @vmprof_execute_code("main", get_code_fn)
+ def f(code, n):
i = 0
while i < n:
- driver.jit_merge_point()
+ driver.jit_merge_point(code=code)
i += 1
llfn()
+ def main(n):
+ cintf.vmprof_tl_stack.setraw(null) # make it empty
+ vmprof = _get_vmprof()
+ code = CodeObj()
+ register_code(code, get_name)
+ return f(code, n)
+
class Hooks(jit.JitHookInterface):
def after_compile(self, debug_info):
self.raw_start = debug_info.asminfo.rawstart
@@ -38,12 +59,12 @@
hooks = Hooks()
null = lltype.nullptr(cintf.VMPROFSTACK)
- cintf.vmprof_tl_stack.setraw(null) # make it empty
- self.meta_interp(f, [10], policy=JitPolicy(hooks))
- v = set(visited)
- assert 0 in v
- v.remove(0)
- assert len(v) == 1
- assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- assert cintf.vmprof_tl_stack.getraw() == null
+ self.meta_interp(main, [10], policy=JitPolicy(hooks))
+ print visited
+ #v = set(visited)
+ #assert 0 in v
+ #v.remove(0)
+ #assert len(v) == 1
+ #assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
+ #assert cintf.vmprof_tl_stack.getraw() == null
# ^^^ make sure we didn't leave anything dangling
diff --git a/rpython/jit/backend/x86/test/test_rvmprof.py b/rpython/jit/backend/x86/test/test_rvmprof.py
--- a/rpython/jit/backend/x86/test/test_rvmprof.py
+++ b/rpython/jit/backend/x86/test/test_rvmprof.py
@@ -3,5 +3,5 @@
from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest
from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
-class TestFfiCall(Jit386Mixin, BaseRVMProfTest):
- pass
\ No newline at end of file
+class TestRVMProfCall(Jit386Mixin, BaseRVMProfTest):
+ pass
From pypy.commits at gmail.com Fri Aug 5 09:41:08 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 06:41:08 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: unskip the broken test
on a branch
Message-ID: <57a49774.47cbc20a.13c7.4ac4@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86029:a9fa0458e104
Date: 2016-08-05 15:40 +0200
http://bitbucket.org/pypy/pypy/changeset/a9fa0458e104/
Log: unskip the broken test on a branch
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -9,8 +9,8 @@
class BaseRVMProfTest(object):
def test_one(self):
- py.test.skip("needs thread-locals in the JIT, which is only available "
- "after translation")
+# py.test.skip("needs thread-locals in the JIT, which is only available "
+# "after translation")
visited = []
def helper():
From pypy.commits at gmail.com Fri Aug 5 10:04:37 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 07:04:37 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: We need the vmprof stack
updates to occur all untranslated, because the
Message-ID: <57a49cf5.c2a5c20a.adc42.5696@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86030:5af59f30c570
Date: 2016-08-05 16:06 +0200
http://bitbucket.org/pypy/pypy/changeset/5af59f30c570/
Log: We need the vmprof stack updates to occur all untranslated, because
the test is going to be checking that the metainterp/* updates it
correctly, and metainterp/* is not translated here
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -37,7 +37,8 @@
register_code_object_class(CodeObj, get_name)
- @vmprof_execute_code("main", get_code_fn)
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
def f(code, n):
i = 0
while i < n:
@@ -46,7 +47,6 @@
llfn()
def main(n):
- cintf.vmprof_tl_stack.setraw(null) # make it empty
vmprof = _get_vmprof()
code = CodeObj()
register_code(code, get_name)
@@ -59,6 +59,7 @@
hooks = Hooks()
null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
self.meta_interp(main, [10], policy=JitPolicy(hooks))
print visited
#v = set(visited)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -4,7 +4,7 @@
from rpython.rlib.rvmprof import cintf
from rpython.rtyper.annlowlevel import cast_instance_to_gcref
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
-from rpython.rtyper.lltypesystem import rffi, llmemory
+from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rweaklist import RWeakListMixin
@@ -140,7 +140,8 @@
if self.cintf.vmprof_register_virtual_function(name, uid, 500000) < 0:
raise VMProfError("vmprof buffers full! disk full or too slow")
-def vmprof_execute_code(name, get_code_fn, result_class=None):
+def vmprof_execute_code(name, get_code_fn, result_class=None,
+ _hack_update_stack_untranslated=False):
"""Decorator to be used on the function that interprets a code object.
'name' must be a unique name.
@@ -150,6 +151,18 @@
'result_class' is ignored (backward compatibility).
"""
+ if _hack_update_stack_untranslated:
+ from rpython.rtyper.annlowlevel import llhelper
+ enter_code = llhelper(lltype.Ptr(
+ lltype.FuncType([lltype.Signed], cintf.PVMPROFSTACK)),
+ cintf.enter_code)
+ leave_code = llhelper(lltype.Ptr(
+ lltype.FuncType([cintf.PVMPROFSTACK], lltype.Void)),
+ cintf.leave_code)
+ else:
+ enter_code = cintf.enter_code
+ leave_code = cintf.leave_code
+
def decorate(func):
try:
_get_vmprof()
@@ -161,11 +174,11 @@
# JIT cannot see through it.
if not jit.we_are_jitted():
unique_id = get_code_fn(*args)._vmprof_unique_id
- x = cintf.enter_code(unique_id)
+ x = enter_code(unique_id)
try:
return func(*args)
finally:
- cintf.leave_code(x)
+ leave_code(x)
else:
return func(*args)
From pypy.commits at gmail.com Fri Aug 5 10:53:48 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 07:53:48 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Stop using setjmp/longjmp,
and instead kill the subprocess. (Previously
Message-ID: <57a4a87c.6814c30a.71dcd.6fa4@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86031:cf7c9337178d
Date: 2016-08-05 16:54 +0200
http://bitbucket.org/pypy/pypy/changeset/cf7c9337178d/
Log: Stop using setjmp/longjmp, and instead kill the subprocess.
(Previously the subprocess was in a half-zombie state.)
diff --git a/rpython/translator/revdb/message.py b/rpython/translator/revdb/message.py
--- a/rpython/translator/revdb/message.py
+++ b/rpython/translator/revdb/message.py
@@ -48,6 +48,10 @@
# if breakpoint_mode=='i': ignored, never sent
ANSWER_BREAKPOINT = -24
+# sent after an Attempted to do I/O or access raw memory, as the last message
+ANSWER_ATTEMPT_IO = -25
+
+
# print one line of a file to the console, for CMD_PRINT
# Message(ANSWER_LINECACHE, linenum, extra=filename)
ANSWER_LINECACHE = 19
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -57,6 +57,10 @@
return a
+class RecreateSubprocess(Exception):
+ pass
+
+
class ReplayProcess(object):
"""Represent one replaying subprocess.
@@ -207,6 +211,8 @@
pgroup.all_printed_objects_lst.append(uid)
sys.stdout.write('$%d = ' % nid)
sys.stdout.flush()
+ elif msg.cmd == ANSWER_ATTEMPT_IO:
+ raise RecreateSubprocess
else:
print >> sys.stderr, "unexpected %r" % (msg,)
@@ -441,7 +447,8 @@
def _resume(self, from_time):
clone_me = self.paused[from_time]
- self.active.close()
+ if self.active is not None:
+ self.active.close()
self.active = clone_me.clone()
def jump_in_time(self, target_time):
@@ -534,6 +541,12 @@
self.active.send(Message(CMD_ATTACHID, nid, uid, int(watch_env)))
self.active.expect_ready()
+ def recreate_subprocess(self):
+ # recreate a subprocess at the current time
+ time = self.get_current_time()
+ self.active = None
+ self.jump_in_time(time)
+
def print_cmd(self, expression, nids=[]):
"""Print an expression.
"""
@@ -545,7 +558,10 @@
self.active.tainted = True
self.attach_printed_objects(uids, watch_env=False)
self.active.send(Message(CMD_PRINT, extra=expression))
- self.active.print_text_answer(pgroup=self)
+ try:
+ self.active.print_text_answer(pgroup=self)
+ except RecreateSubprocess:
+ self.recreate_subprocess()
def show_backtrace(self, complete=1):
"""Show the backtrace.
@@ -553,14 +569,20 @@
if complete:
self.active.tainted = True
self.active.send(Message(CMD_BACKTRACE, complete))
- self.active.print_text_answer()
+ try:
+ self.active.print_text_answer()
+ except RecreateSubprocess:
+ self.recreate_subprocess()
def show_locals(self):
"""Show the locals.
"""
self.active.tainted = True
self.active.send(Message(CMD_LOCALS))
- self.active.print_text_answer()
+ try:
+ self.active.print_text_answer()
+ except RecreateSubprocess:
+ self.recreate_subprocess()
def edit_breakpoints(self):
return self.all_breakpoints
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -9,7 +9,6 @@
#include
#include
#include
-#include
#include
#include
@@ -38,7 +37,7 @@
#define FID_REGULAR_MODE 'R'
#define FID_SAVED_STATE 'S'
-#define FID_JMPBUF_PROTECTED 'J'
+#define FID_POTENTIAL_IO 'I'
typedef struct {
@@ -566,6 +565,7 @@
#define ANSWER_FORKED (-22)
#define ANSWER_AT_END (-23)
#define ANSWER_BREAKPOINT (-24)
+#define ANSWER_ATTEMPT_IO (-25)
#define RECORD_BKPT_NUM 50
@@ -575,7 +575,6 @@
static const char *rpy_rev_filename;
static uint64_t interactive_break = 1, finalizer_break = -1, uid_break = -1;
static uint64_t total_stop_points;
-static jmp_buf jmp_buf_cancel_execution;
static void (*pending_after_forward)(void);
static RPyString *empty_string;
static uint64_t last_recorded_breakpoint_loc;
@@ -858,13 +857,12 @@
*/
fprintf(stderr, "%s:%d: Attempted to do I/O or access raw memory\n",
file, line);
- if (flag_io_disabled == FID_JMPBUF_PROTECTED) {
- longjmp(jmp_buf_cancel_execution, 1);
- }
- else {
+ if (flag_io_disabled != FID_POTENTIAL_IO) {
fprintf(stderr, "but we are not in a jmpbuf_protected section\n");
exit(1);
}
+ write_answer(ANSWER_ATTEMPT_IO, 0, 0, 0);
+ exit(0);
}
}
@@ -916,23 +914,24 @@
set_revdb_breakpoints();
}
-static void protect_jmpbuf(void)
+static void protect_potential_io(void)
{
- change_flag_io_disabled(FID_SAVED_STATE, FID_JMPBUF_PROTECTED);
+ change_flag_io_disabled(FID_SAVED_STATE, FID_POTENTIAL_IO);
saved_exc[0] = pypy_g_ExcData.ed_exc_type;
saved_exc[1] = pypy_g_ExcData.ed_exc_value;
pypy_g_ExcData.ed_exc_type = NULL;
pypy_g_ExcData.ed_exc_value = NULL;
}
-static void unprotect_jmpbuf(void)
+static void unprotect_potential_io(void)
{
- change_flag_io_disabled(FID_JMPBUF_PROTECTED, FID_SAVED_STATE);
+ change_flag_io_disabled(FID_POTENTIAL_IO, FID_SAVED_STATE);
if (pypy_g_ExcData.ed_exc_type != NULL) {
fprintf(stderr, "Command crashed with %.*s\n",
(int)(pypy_g_ExcData.ed_exc_type->ov_name->rs_chars.length),
pypy_g_ExcData.ed_exc_type->ov_name->rs_chars.items);
- exit(1);
+ write_answer(ANSWER_ATTEMPT_IO, 1, 0, 0);
+ exit(0);
}
pypy_g_ExcData.ed_exc_type = saved_exc[0];
pypy_g_ExcData.ed_exc_value = saved_exc[1];
@@ -942,10 +941,9 @@
rpy_revdb_command_t *cmd,
RPyString *extra)
{
- protect_jmpbuf();
- if (setjmp(jmp_buf_cancel_execution) == 0)
- func(cmd, extra);
- unprotect_jmpbuf();
+ protect_potential_io();
+ func(cmd, extra);
+ unprotect_potential_io();
}
static void check_at_end(uint64_t stop_points)
@@ -1267,10 +1265,9 @@
save_state();
if (rpy_revdb_commands.rp_alloc) {
- protect_jmpbuf();
- if (setjmp(jmp_buf_cancel_execution) == 0)
- rpy_revdb_commands.rp_alloc(uid, new_object);
- unprotect_jmpbuf();
+ protect_potential_io();
+ rpy_revdb_commands.rp_alloc(uid, new_object);
+ unprotect_potential_io();
}
uid_break = *++future_next_id;
restore_state();
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -395,6 +395,7 @@
def test_io_not_permitted(self):
child = self.replay(stderr=subprocess.PIPE)
child.send(Message(1, extra='oops'))
+ child.expect(ANSWER_ATTEMPT_IO)
child.close()
err = self.subproc.stderr.read()
assert err.endswith(': Attempted to do I/O or access raw memory\n')
From pypy.commits at gmail.com Fri Aug 5 11:19:51 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 08:19:51 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: work on test
Message-ID: <57a4ae97.c4431c0a.97016.2fa5@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86032:31850c5d6db3
Date: 2016-08-05 17:19 +0200
http://bitbucket.org/pypy/pypy/changeset/31850c5d6db3/
Log: work on test
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -14,22 +14,22 @@
visited = []
def helper():
+ trace = []
stack = cintf.vmprof_tl_stack.getraw()
- print stack
- if stack:
- # not during tracing
- visited.append(stack.c_value)
- else:
- visited.append(0)
+ while stack:
+ trace.append((stack.c_kind, stack.c_value))
+ stack = stack.c_next
+ visited.append(trace)
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
driver = jit.JitDriver(greens=['code'], reds='auto')
class CodeObj(object):
- pass
+ def __init__(self, name):
+ self.name = name
- def get_code_fn(code, arg):
+ def get_code_fn(codes, code, arg):
return code
def get_name(code):
@@ -39,18 +39,21 @@
@vmprof_execute_code("main", get_code_fn,
_hack_update_stack_untranslated=True)
- def f(code, n):
+ def f(codes, code, n):
i = 0
while i < n:
driver.jit_merge_point(code=code)
+ if code.name == "main":
+ f(codes, codes[1], 5)
i += 1
llfn()
def main(n):
vmprof = _get_vmprof()
- code = CodeObj()
- register_code(code, get_name)
- return f(code, n)
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ return f(codes, codes[0], n)
class Hooks(jit.JitHookInterface):
def after_compile(self, debug_info):
From pypy.commits at gmail.com Fri Aug 5 11:40:41 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 08:40:41 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: More next/bnext tweaks
Message-ID: <57a4b379.81a2c20a.51926.d634@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86033:6b40ed980320
Date: 2016-08-05 17:42 +0200
http://bitbucket.org/pypy/pypy/changeset/6b40ed980320/
Log: More next/bnext tweaks
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -512,7 +512,11 @@
uid = 0
else:
uid = revdb.get_unique_id(frame)
- revdb.send_answer(revdb.ANSWER_STACKID, uid)
+ if revdb.current_place() == -2:
+ hidden_level = 1 # hide the "<<" events from next/bnext commands
+ else:
+ hidden_level = 0
+ revdb.send_answer(revdb.ANSWER_STACKID, uid, hidden_level)
lambda_stackid = lambda: command_stackid
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -237,47 +237,61 @@
def command_next(self, argument):
"""Run forward for one step, skipping calls"""
- stack_id = self.pgroup.get_stack_id(is_parent=False)
- with self._stack_id_break(stack_id):
- b = self.move_forward(1)
- while b is not None:
- # if we hit a regular breakpoint, stop
- if any(b.regular_breakpoint_nums()):
- return
- # we hit only calls and returns inside stack_id. If the
- # last one of these is a "return", then we're now back inside
- # stack_id, so stop
- if b.nums[-1] == -2:
- return
- # else, the last one is a "call", so we entered another frame.
- # Continue running until the next call/return event occurs
- # inside stack_id
+ while True:
+ stack_id = self.pgroup.get_stack_id(is_parent=False)
with self._stack_id_break(stack_id):
- b = self.move_forward(self.pgroup.get_max_time() -
- self.pgroup.get_current_time())
- # and then look at that 'b' again (closes the loop)
+ b = self.move_forward(1)
+ while b is not None:
+ # if we hit a regular breakpoint, stop
+ if any(b.regular_breakpoint_nums()):
+ return
+ # we hit only calls and returns inside stack_id. If the
+ # last one of these is a "return", then we're now back inside
+ # stack_id, so stop
+ if b.nums[-1] == -2:
+ break
+ # else, the last one is a "call", so we entered another frame.
+ # Continue running until the next call/return event occurs
+ # inside stack_id
+ with self._stack_id_break(stack_id):
+ b = self.move_forward(self.pgroup.get_max_time() -
+ self.pgroup.get_current_time())
+ # and then look at that 'b' again (closes the loop)
+
+ # we might be at a "<<" position on the same line as before,
+ # which returns a get_hiddenpos_level() value of 1. Continue
+ # until we reach a get_hiddenpos_level() value of 0.
+ if b is None or self.pgroup.get_hiddenpos_level() == 0:
+ break
command_n = command_next
def command_bnext(self, argument):
"""Run backward for one step, skipping calls"""
- stack_id = self.pgroup.get_stack_id(is_parent=False)
- with self._stack_id_break(stack_id):
- b = self.move_backward(1)
- while b is not None:
- # if we hit a regular breakpoint, stop
- if any(b.regular_breakpoint_nums()):
- return
- # we hit only calls and returns inside stack_id. If the
- # first one of these is a "call", then we're now back inside
- # stack_id, so stop
- if b.nums[0] == -1:
- return
- # else, the first one is a "return", so before, we were
- # inside a different frame. Continue running until the next
- # call/return event occurs inside stack_id
+ while True:
+ stack_id = self.pgroup.get_stack_id(is_parent=False)
with self._stack_id_break(stack_id):
- b = self.move_backward(self.pgroup.get_current_time() - 1)
- # and then look at that 'b' again (closes the loop)
+ b = self.move_backward(1)
+ while b is not None:
+ # if we hit a regular breakpoint, stop
+ if any(b.regular_breakpoint_nums()):
+ return
+ # we hit only calls and returns inside stack_id. If the
+ # first one of these is a "call", then we're now back inside
+ # stack_id, so stop
+ if b.nums[0] == -1:
+ break
+ # else, the first one is a "return", so before, we were
+ # inside a different frame. Continue running until the next
+ # call/return event occurs inside stack_id
+ with self._stack_id_break(stack_id):
+ b = self.move_backward(self.pgroup.get_current_time() - 1)
+ # and then look at that 'b' again (closes the loop)
+
+ # we might be at a "<<" position on the same line as before,
+ # which returns a get_hiddenpos_level() value of 1. Continue
+ # until we reach a get_hiddenpos_level() value of 0.
+ if self.pgroup.get_hiddenpos_level() == 0:
+ break
command_bn = command_bnext
def command_finish(self, argument):
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -587,8 +587,14 @@
def edit_breakpoints(self):
return self.all_breakpoints
+ def _stack_id(self, is_parent=0):
+ self.active.send(Message(CMD_STACKID, is_parent))
+ msg = self.active.expect(ANSWER_STACKID, Ellipsis, Ellipsis)
+ self.active.expect_ready()
+ return msg
+
def get_stack_id(self, is_parent):
- self.active.send(Message(CMD_STACKID, is_parent))
- msg = self.active.expect(ANSWER_STACKID, Ellipsis)
- self.active.expect_ready()
- return msg.arg1
+ return self._stack_id(is_parent).arg1
+
+ def get_hiddenpos_level(self):
+ return self._stack_id().arg2
From pypy.commits at gmail.com Fri Aug 5 11:41:58 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Fri, 05 Aug 2016 08:41:58 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: rewritten intro and changed title
of blogpost
Message-ID: <57a4b3c6.e2efc20a.3d189.98bd@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5663:fde0ad51e238
Date: 2016-08-05 17:41 +0200
http://bitbucket.org/pypy/extradoc/changeset/fde0ad51e238/
Log: rewritten intro and changed title of blogpost
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -1,26 +1,30 @@
-JitViewer moves to vmprof.com
+PyPy Tooling Upgrade: JitViewer and VMProf
=======
-We are happy to announce that VMProf got a major update. The most significant change is the movement of JitViewer (JV)
-to VMProf.
+We are happy to announce a major JitViewer (JV) update.
JV allows you to inspect PyPy's internal compiler representation including the generated machine code of your program.
-A useful tool to understand PyPy, learn many details of our compiler and find potential issues related to our JIT.
-Both VMProf and JV share some common goals. That is the reason why they are now both packaged together.
+A useful tool to spot issues in your program and learn PyPy's compiler details.
+
+VMProf is a statistical cpu profiler imposing very little overhead at runtime.
+
+Both VMProf and JitViewer share a common goal: Present useful information for your Python program.
+The combination of both might reveal more information. That is the reason why they are now both packaged together.
www.vmprof.com also got updated with various bugfixes and changes including an all new interface to JV.
An advertisment: We constantly improve tooling and libraries around the Python/PyPy eco system.
-Here are a three examples you might also want to use in your Python projects:
+Here are a four examples you might also want to use in your Python projects:
-* VMProf - A statistical CPU profiler
-* RevDB - A reverse debugger for Python
-* CFFI - Foreign Function Interface that avoids CPyExt
+* VMProf - A statistical CPU profiler (http://vmprof.readthedocs.io/en/latest/)
+* RevDB - A reverse debugger for Python (https://morepypy.blogspot.co.at/2016/07/reverse-debugging-for-python.html)
+* CFFI - Foreign Function Interface that avoids CPyExt (http://cffi.readthedocs.io/en/latest/)
+* JitViewer - Visualization of the log file produced by PyPy (http://vmprof.readthedocs.io/en/latest/)
A "brand new" JitViewer
---------------------
-The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools, most notably JV. Additionaly the logging output of a long running program took a lot of disk space.
+The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools. Additionaly the logging output of a long running program took a lot of disk space.
-Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that protocol supports versioning and can be extended easily. And *drumroll* you do not need to install JV yourself anymore! The whole system moved to vmprof.com and you can use it any time.
+Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that it supports versioning and can be extended easily. And *drumroll* you do not need to install JV yourself anymore! The whole system moved to vmprof.com and you can use it any time.
Sounds great. But what can you do with it? Here are two examples useful for a PyPy user:
From pypy.commits at gmail.com Fri Aug 5 11:47:53 2016
From: pypy.commits at gmail.com (rlamy)
Date: Fri, 05 Aug 2016 08:47:53 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Allow attribute deletion on
C-defined types
Message-ID: <57a4b529.465d1c0a.1f06f.40ae@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86034:016781b80468
Date: 2016-08-05 16:47 +0100
http://bitbucket.org/pypy/pypy/changeset/016781b80468/
Log: Allow attribute deletion on C-defined types
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -294,6 +294,15 @@
assert type(obj)._some_attribute == 1
del d["_some_attribute"]
+ class A(object):
+ pass
+ obj = A()
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
d = module.get_type_dict(1)
assert type(d) is dict
try:
@@ -371,6 +380,21 @@
api.Py_DecRef(ref)
+ def test_type_dict(self, space, api):
+ w_class = space.appexec([], """():
+ class A(object):
+ pass
+ return A
+ """)
+ ref = make_ref(space, w_class)
+
+ py_type = rffi.cast(PyTypeObjectPtr, ref)
+ w_dict = from_ref(space, py_type.c_tp_dict)
+ w_name = space.newunicode(u'a')
+ space.setitem(w_dict, w_name, space.wrap(1))
+ assert space.int_w(space.getattr(w_class, w_name)) == 1
+ space.delitem(w_dict, w_name)
+
def test_multiple_inheritance(self, space, api):
w_class = space.appexec([], """():
class A(object):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -344,7 +344,7 @@
def deldictvalue(self, space, key):
if self.lazyloaders:
self._cleanup_() # force un-lazification
- if not self.is_heaptype():
+ if not (self.is_heaptype() or self.is_cpytype()):
raise oefmt(space.w_TypeError,
"can't delete attributes on type object '%N'", self)
try:
From pypy.commits at gmail.com Fri Aug 5 11:49:39 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 08:49:39 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: fight fight fight
Message-ID: <57a4b593.53b81c0a.e11d7.4049@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86035:a7b374677365
Date: 2016-08-05 17:48 +0200
http://bitbucket.org/pypy/pypy/changeset/a7b374677365/
Log: fight fight fight
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -35,6 +35,7 @@
def get_name(code):
return "foo"
+ _get_vmprof().use_weaklist = False
register_code_object_class(CodeObj, get_name)
@vmprof_execute_code("main", get_code_fn,
@@ -49,7 +50,6 @@
llfn()
def main(n):
- vmprof = _get_vmprof()
codes = [CodeObj("main"), CodeObj("not main")]
for code in codes:
register_code(code, get_name)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -29,6 +29,8 @@
_immutable_fields_ = ['is_enabled?']
+ use_weaklist = True # False for tests
+
def __init__(self):
"NOT_RPYTHON: use _get_vmprof()"
self._code_classes = set()
@@ -56,7 +58,7 @@
self._code_unique_id = uid
if self.is_enabled:
self._write_code_registration(uid, full_name_func(code))
- else:
+ elif self.use_weaklist:
code._vmprof_weak_list.add_handle(code)
def register_code_object_class(self, CodeClass, full_name_func):
@@ -86,7 +88,8 @@
class WeakCodeObjectList(RWeakListMixin):
def __init__(self):
self.initialize()
- CodeClass._vmprof_weak_list = WeakCodeObjectList()
+ if self.use_weaklist:
+ CodeClass._vmprof_weak_list = WeakCodeObjectList()
#
def gather_all_code_objs():
all_code_wrefs = CodeClass._vmprof_weak_list.get_all_handles()
From pypy.commits at gmail.com Fri Aug 5 11:50:53 2016
From: pypy.commits at gmail.com (rlamy)
Date: Fri, 05 Aug 2016 08:50:53 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Cleanup,
delete unused MappingProxyStrategy
Message-ID: <57a4b5dd.898b1c0a.d4b9d.4247@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86036:38670620e06c
Date: 2016-08-05 16:50 +0100
http://bitbucket.org/pypy/pypy/changeset/38670620e06c/
Log: Cleanup, delete unused MappingProxyStrategy
diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/classdict.py
+++ b/pypy/objspace/std/classdict.py
@@ -2,10 +2,8 @@
from rpython.rlib.objectmodel import iteritems_with_hash
from pypy.interpreter.error import OperationError, oefmt
-from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.typedef import TypeDef
from pypy.objspace.std.dictmultiobject import (
- DictStrategy, W_DictObject, create_iterator_classes)
+ DictStrategy, create_iterator_classes)
from pypy.objspace.std.typeobject import unwrap_cell
@@ -80,7 +78,8 @@
for key in w_type.dict_w.iterkeys()])
def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
+ return [unwrap_cell(self.space, w_value) for w_value in
+ self.unerase(w_dict.dstorage).dict_w.itervalues()]
def items(self, w_dict):
space = self.space
@@ -100,12 +99,16 @@
def getiterkeys(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.iterkeys()
+
def getitervalues(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.itervalues()
+
def getiteritems_with_hash(self, w_dict):
return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
+
def wrapkey(space, key):
return _wrapkey(space, key)
+
def wrapvalue(space, value):
return unwrap_cell(space, value)
@@ -114,70 +117,3 @@
return space.wrap(key.decode('utf-8'))
create_iterator_classes(ClassDictStrategy)
-
-
-class MappingProxyStrategy(DictStrategy):
- """Wraps an applevel mapping in a read-only dictionary."""
- erase, unerase = rerased.new_erasing_pair("mappingproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
-
- def getitem(self, w_dict, w_key):
- try:
- return self.space.getitem(self.unerase(w_dict.dstorage), w_key)
- except OperationError as e:
- if not e.match(self.space, self.space.w_KeyError):
- raise
- return None
-
- def setitem(self, w_dict, w_key, w_value):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item assignment", w_dict)
-
- def delitem(self, w_dict, w_key):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item deletion", w_dict)
-
- def length(self, w_dict):
- return self.space.len_w(self.unerase(w_dict.dstorage))
-
- def getiterkeys(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "keys"))
-
- def getitervalues(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "values"))
-
- def getiteritems_with_hash(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "items"))
-
- @staticmethod
- def override_next_key(iterkeys):
- w_keys = iterkeys.iterator
- return iterkeys.space.next(w_keys)
-
- @staticmethod
- def override_next_value(itervalues):
- w_values = itervalues.iterator
- return itervalues.space.next(w_values)
-
- @staticmethod
- def override_next_item(iteritems):
- w_items = iteritems.iterator
- w_item = iteritems.space.next(w_items)
- w_key, w_value = iteritems.space.unpackiterable(w_item, 2)
- return w_key, w_value
-
- def clear(self, w_dict):
- raise oefmt(self.space.w_AttributeError, "clear")
-
- def copy(self, w_dict):
- return self.space.call_method(self.unerase(w_dict.dstorage), "copy")
-
-create_iterator_classes(
- MappingProxyStrategy,
- override_next_key=MappingProxyStrategy.override_next_key,
- override_next_value=MappingProxyStrategy.override_next_value,
- override_next_item=MappingProxyStrategy.override_next_item)
From pypy.commits at gmail.com Fri Aug 5 11:55:16 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 08:55:16 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: fight fight until we win
Message-ID: <57a4b6e4.915c1c0a.c121.3f19@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86037:0bcafba73720
Date: 2016-08-05 17:54 +0200
http://bitbucket.org/pypy/pypy/changeset/0bcafba73720/
Log: fight fight until we win
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -25,6 +25,10 @@
def __str__(self):
return self.msg
+class FakeWeakCodeObjectList(object):
+ def add_handle(self, handle):
+ pass
+
class VMProf(object):
_immutable_fields_ = ['is_enabled?']
@@ -90,6 +94,8 @@
self.initialize()
if self.use_weaklist:
CodeClass._vmprof_weak_list = WeakCodeObjectList()
+ else:
+ CodeClass._vmprof_weak_list = FakeWeakCodeObjectList()
#
def gather_all_code_objs():
all_code_wrefs = CodeClass._vmprof_weak_list.get_all_handles()
From pypy.commits at gmail.com Fri Aug 5 12:06:08 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 09:06:08 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <57a4b970.a717c20a.155f5.9596@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r773:51149b559c70
Date: 2016-08-05 18:08 +0200
http://bitbucket.org/pypy/pypy.org/changeset/51149b559c70/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $64753 of $105000 (61.7%)
+ $64781 of $105000 (61.7%)
@@ -23,7 +23,7 @@
This donation goes towards supporting Python 3 in PyPy.
Current status:
- we have $5240 left
+ we have $5266 left
in the account. Read proposal
From pypy.commits at gmail.com Fri Aug 5 13:11:37 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 10:11:37 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: make sure we call llfn()
only when in the inside code
Message-ID: <57a4c8c9.c2a5c20a.adc42.9bd8@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86038:f89c232d0bfe
Date: 2016-08-05 19:10 +0200
http://bitbucket.org/pypy/pypy/changeset/f89c232d0bfe/
Log: make sure we call llfn() only when in the inside code
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -46,8 +46,9 @@
driver.jit_merge_point(code=code)
if code.name == "main":
f(codes, codes[1], 5)
+ else:
+ llfn()
i += 1
- llfn()
def main(n):
codes = [CodeObj("main"), CodeObj("not main")]
From pypy.commits at gmail.com Fri Aug 5 13:24:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 10:24:57 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Tweaks
Message-ID: <57a4cbe9.28eac20a.8f95a.a15a@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86039:9c9a38357bca
Date: 2016-08-05 19:26 +0200
http://bitbucket.org/pypy/pypy/changeset/9c9a38357bca/
Log: Tweaks
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -253,13 +253,13 @@
# ____________________________________________________________
-def fetch_cur_frame():
+def fetch_cur_frame(silent=False):
ec = dbstate.space.threadlocals.get_ec()
if ec is None:
frame = None
else:
frame = ec.topframeref()
- if frame is None:
+ if frame is None and not silent:
revdb.send_output("No stack.\n")
return frame
@@ -388,8 +388,9 @@
indent))
revdb.send_linecache(frame.getcode().co_filename, lineno)
-def display_function_part(frame, max_lines_before, max_lines_after,
- prompt="> "):
+def display_function_part(frame, max_lines_before, max_lines_after):
+ if frame is None:
+ return
code = frame.getcode()
if code.co_filename.startswith(''):
return
@@ -407,33 +408,33 @@
#
for i in range(first_lineno, final_lineno + 1):
if i == current_lineno:
+ if revdb.current_place() == -2: # <= this is the arg to stop_point()
+ prompt = "<< " # return
+ elif revdb.current_place() == -1:
+ prompt = "!! " # exceptional return
+ else:
+ prompt = " > " # plain line
revdb.send_output(prompt)
else:
- revdb.send_output(" ")
+ revdb.send_output(" ")
revdb.send_linecache(code.co_filename, i, strip=False)
#
if ellipsis_after:
revdb.send_output("...\n")
def command_backtrace(cmd, extra):
- frame = fetch_cur_frame()
- if frame is None:
- return
+ frame = fetch_cur_frame(silent=True)
if cmd.c_arg1 == 0:
- revdb.send_output("%s:\n" % (
- file_and_lineno(frame, frame.get_last_lineno()),))
- if revdb.current_place() == -2: # <= this is the arg to stop_point()
- prompt = "<<" # return
- elif revdb.current_place() == -1:
- prompt = "!!" # exceptional return
- else:
- prompt = "> " # plain line
- display_function_part(frame, max_lines_before=8, max_lines_after=5,
- prompt=prompt)
+ if frame is not None:
+ revdb.send_output("%s:\n" % (
+ file_and_lineno(frame, frame.get_last_lineno()),))
+ display_function_part(frame, max_lines_before=8, max_lines_after=5)
elif cmd.c_arg1 == 2:
display_function_part(frame, max_lines_before=1000,max_lines_after=1000)
else:
revdb.send_output("Current call stack (most recent call last):\n")
+ if frame is None:
+ revdb.send_output(" (empty)\n")
frames = []
while frame is not None:
frames.append(frame)
@@ -505,7 +506,7 @@
def command_stackid(cmd, extra):
- frame = fetch_cur_frame()
+ frame = fetch_cur_frame(silent=True)
if frame is not None and cmd.c_arg1 != 0: # parent_flag
frame = dbstate.space.getexecutioncontext().getnextframe_nohidden(frame)
if frame is None:
From pypy.commits at gmail.com Fri Aug 5 13:25:10 2016
From: pypy.commits at gmail.com (fijal)
Date: Fri, 05 Aug 2016 10:25:10 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: make the first assert
about metainterp
Message-ID: <57a4cbf6.87941c0a.9a01f.697a@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86040:99215a4747c0
Date: 2016-08-05 19:24 +0200
http://bitbucket.org/pypy/pypy/changeset/99215a4747c0/
Log: make the first assert about metainterp
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -45,7 +45,7 @@
while i < n:
driver.jit_merge_point(code=code)
if code.name == "main":
- f(codes, codes[1], 5)
+ f(codes, codes[1], 1)
else:
llfn()
i += 1
@@ -65,7 +65,7 @@
null = lltype.nullptr(cintf.VMPROFSTACK)
cintf.vmprof_tl_stack.setraw(null)
self.meta_interp(main, [10], policy=JitPolicy(hooks))
- print visited
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
#v = set(visited)
#assert 0 in v
#v.remove(0)
From pypy.commits at gmail.com Fri Aug 5 14:26:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 05 Aug 2016 11:26:02 -0700 (PDT)
Subject: [pypy-commit] pypy default: Another hack to avoid constant-folding
"2 ** 12345678912"
Message-ID: <57a4da3a.c2f3c20a.a80d5.bf8e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86041:5df38f3fbcc0
Date: 2016-08-05 20:27 +0200
http://bitbucket.org/pypy/pypy/changeset/5df38f3fbcc0/
Log: Another hack to avoid constant-folding "2 ** 12345678912"
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -108,8 +108,15 @@
return getattr(space, name)(operand)
return do_fold
-def _fold_pow(space, left, right):
- return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+ # don't constant-fold if "w_left" and "w_right" are integers and
+ # the estimated bit length of the power is unreasonably large
+ space.appexec([w_left, w_right], """(left, right):
+ if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+ if left.bit_length() * right > 5000:
+ raise OverflowError
+ """)
+ return space.pow(w_left, w_right, space.w_None)
def _fold_not(space, operand):
return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1156,3 +1156,22 @@
counts = self.count_instructions(source)
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
+
+ def test_dont_fold_huge_powers(self):
+ for source in (
+ "2 ** 3000", # not constant-folded: too big
+ "(-2) ** 3000",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER in counts
+
+ for source in (
+ "2 ** 2000", # constant-folded
+ "2 ** -3000",
+ "1.001 ** 3000",
+ "1 ** 3000.0",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER not in counts
From pypy.commits at gmail.com Fri Aug 5 14:59:21 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 05 Aug 2016 11:59:21 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: (plan_rich) Fix cast in
memoryobject for rpython
Message-ID: <57a4e209.0472c20a.45b74.d088@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86042:da8ab61904e8
Date: 2016-08-05 20:58 +0200
http://bitbucket.org/pypy/pypy/changeset/da8ab61904e8/
Log: (plan_rich) Fix cast in memoryobject for rpython
diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py
--- a/pypy/module/thread/os_lock.py
+++ b/pypy/module/thread/os_lock.py
@@ -147,7 +147,8 @@
def set_sentinel(space):
"""Set a sentinel lock that will be released when the current thread
state is finalized (after it is untied from the interpreter)."""
- return space.wrap(Lock(space))
+ lock = allocate_lock(space)
+ return lock
class W_RLock(W_Root):
def __init__(self, space):
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -226,12 +226,12 @@
size = rffi.sizeof(rffi.VOIDP)
return size
- def descr_cast(self, space, w_args, w_kwds):
+ def descr_cast(self, space, w_format, w_shape=None):
# XXX fixme. does not do anything near cpython (see memoryobjet.c memory_cast)
- #self._check_released(space)
- #newitemsize = self.get_native_fmtchar(w_args._val(w_args))
- return W_MemoryView(self.buf, self.format, self.itemsize)
- return mv
+ self._check_released(space)
+ fmt = space.str_w(w_format)
+ newitemsize = self.get_native_fmtchar(fmt)
+ return W_MemoryView(self.buf, fmt, newitemsize)
W_MemoryView.typedef = TypeDef(
From pypy.commits at gmail.com Fri Aug 5 15:42:37 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 05 Aug 2016 12:42:37 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Add tests for BUILD_MAP_UNPACK
Message-ID: <57a4ec2d.6211c20a.475ef.cdfe@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86043:0f39cfd33706
Date: 2016-08-05 21:42 +0200
http://bitbucket.org/pypy/pypy/changeset/0f39cfd33706/
Log: Add tests for BUILD_MAP_UNPACK
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -256,7 +256,18 @@
return a, b, c, d
"""
assert self.codetest(code, "f", [1, 2], {"d" : 4, "c" : 3}) == (1, 2, 3, 4)
-
+
+ def test_build_map_unpack(self):
+ code = """
+ def f():
+ return {'x': 1, **{'y': 2}}
+ def g():
+ return {**()}
+ """
+ assert self.codetest(code, "f", []) == {'x': 1, 'y': 2}
+ res = self.codetest(code, 'g', [])
+ assert "TypeError:" in res
+ assert "'tuple' object is not a mapping" in res
class AppTestInterpreter:
From pypy.commits at gmail.com Fri Aug 5 16:07:18 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 05 Aug 2016 13:07:18 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Add tests for
build_set/tuple/list_unpack
Message-ID: <57a4f1f6.d42f1c0a.7bc41.8fa1@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86044:60ce08921aef
Date: 2016-08-05 22:06 +0200
http://bitbucket.org/pypy/pypy/changeset/60ce08921aef/
Log: Add tests for build_set/tuple/list_unpack
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -257,6 +257,24 @@
"""
assert self.codetest(code, "f", [1, 2], {"d" : 4, "c" : 3}) == (1, 2, 3, 4)
+ def test_build_set_unpack(self):
+ code = """ def f():
+ return {*range(4), 4, *(5, 6, 7)}
+ """
+ assert self.codetest(code, "f", []) == {0, 1, 2, 3, 4, 5, 6, 7}
+
+ def test_build_tuple_unpack(self):
+ code = """ def f():
+ return (*range(4), 4)
+ """
+ assert self.codetest(code, "f", []) == (0, 1, 2, 3, 4)
+
+ def test_build_list_unpack(self):
+ code = """ def f():
+ return [*range(4), 4]
+ """
+ assert self.codetest(code, "f", []) == [0, 1, 2, 3, 4]
+
def test_build_map_unpack(self):
code = """
def f():
From pypy.commits at gmail.com Fri Aug 5 16:13:58 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Fri, 05 Aug 2016 13:13:58 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: translation issues batch 2
Message-ID: <57a4f386.041f1c0a.342ac.959d@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86045:5a7b8af95db6
Date: 2016-08-05 22:13 +0200
http://bitbucket.org/pypy/pypy/changeset/5a7b8af95db6/
Log: translation issues batch 2
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -581,12 +581,13 @@
# position, then raise a GeneratorExit. Otherwise, there is
# no point.
# If coroutine was never awaited on issue a RuntimeWarning.
- if self.pycode is not None:
- if self.frame is not None:
- if self.frame.fget_f_lasti(self.frame).int_w(self.space) == -1:
- raise oefmt(space.w_RuntimeWarning,
- "coroutine '%s' was never awaited",
- self.pycode.co_name)
+ if self.pycode is not None and \
+ self.frame is not None and \
+ self.frame.last_instr == -1:
+ # XXX PyErr_Occured in condition?
+ raise oefmt(self.space.w_RuntimeWarning,
+ "coroutine '%s' was never awaited",
+ self.pycode.co_name)
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -6,7 +6,7 @@
from rpython.rlib import jit, rstackovf, rstring
from rpython.rlib.debug import check_nonneg
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, always_inline
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.tool.sourcetools import func_with_new_name
@@ -18,6 +18,7 @@
from pypy.interpreter.nestedscope import Cell
from pypy.interpreter.pycode import PyCode, BytecodeCorruption
from pypy.tool.stdlib_opcode import bytecode_spec
+from pypy.objspace.std.dictmultiobject import W_DictMultiObject
CANNOT_CATCH_MSG = ("catching classes that don't inherit from BaseException "
"is not allowed in 3.x")
@@ -45,6 +46,27 @@
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
+def get_func_desc(func):
+ if self.space.type(func) is function.Function:
+ return "()"
+ elif self.space.type(func) is function.Method:
+ return "()"
+ else:
+ return " object";
+
+ at always_inline
+def list_unpack_helper(frame, itemcount):
+ space = frame.space
+ w_sum = space.newlist([], sizehint=itemcount)
+ for i in range(itemcount, 0, -1):
+ w_item = frame.peekvalue(i-1)
+ #items = frame.space.fixedview(w_item)
+ w_sum.extend(w_item)
+ while itemcount != 0:
+ frame.popvalue()
+ itemcount -= 1
+ return w_sum
+
opcodedesc = bytecode_spec.opcodedesc
HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT
@@ -1351,74 +1373,73 @@
self.space.call_method(w_set, 'add', w_item)
self.pushvalue(w_set)
- def unpack_helper(self, itemcount, next_instr):
- w_sum = []
+ def BUILD_SET_UNPACK(self, itemcount, next_instr):
+ space = self.space
+ w_sum = space.newset()
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
- items = self.space.fixedview(w_item)
- w_sum.extend(items)
+ # cannot use w_sum.update, w_item might not be a set
+ iterator = w_item.itervalues()
+ while True:
+ w_value = iterator.next_value()
+ if w_value is None:
+ break
+ w_sum.add(w_value)
while itemcount != 0:
self.popvalue()
itemcount -= 1
- return w_sum
-
- def BUILD_SET_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newset(w_sum))
+ self.pushvalue(w_sum)
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newtuple(w_sum))
-
+ space = self.space
+ w_sum_list = list_unpack_helper(self, itemcount)
+ self.pushvalue(space.newtuple(w_sum_list))
+
def BUILD_LIST_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newlist(w_sum))
-
- def getFuncDesc(func):
- if self.space.type(aaa).name.decode('utf-8') == 'method':
- return "()"
- elif self.space.type(aaa).name.decode('utf-8') == 'function':
- return "()"
- else:
- return " object";
-
+ w_sum = list_unpack_helper(self, itemcount)
+ self.pushvalue(w_sum)
+
def BUILD_MAP_UNPACK_WITH_CALL(self, itemcount, next_instr):
+ space = self.space
num_maps = itemcount & 0xff
function_location = (itemcount>>8) & 0xff
- w_dict = self.space.newdict()
- dict_class = w_dict.__class__
+ w_dict = space.newdict()
for i in range(num_maps, 0, -1):
w_item = self.peekvalue(i-1)
- if not issubclass(w_item.__class__, dict_class):
- raise oefmt(self.space.w_TypeError,
+ if space.lookup(w_item, '__getitem__') is None:
+ raise oefmt(space.w_TypeError,
"'%T' object is not a mapping", w_item)
- num_items = w_item.length()
- keys = w_item.w_keys()
- for j in range(num_items):
- if self.space.type(keys.getitem(j)).name.decode('utf-8') == 'method':
+ iterator = w_item.iterkeys()
+ while True:
+ w_key = iterator.next_key()
+ if w_key is None:
+ break
+ if not isinstance(w_key, space.UnicodeObjectCls):
err_fun = self.peekvalue(num_maps + function_location-1)
- raise oefmt(self.space.w_TypeError,
- "%N%s keywords must be strings", err_fun, getFuncDesc(err_fun))
- if self.space.is_true(self.space.contains(w_dict,keys.getitem(j))):
+ raise oefmt(space.w_TypeError,
+ "%N%s keywords must be strings", err_fun,
+ get_func_desc(err_fun))
+ if space.is_true(space.contains(w_dict,w_key)):
err_fun = self.peekvalue(num_maps + function_location-1)
- err_arg = self.space.unicode_w(keys.getitem(j))
- raise oefmt(self.space.w_TypeError,
- "%N%s got multiple values for keyword argument %s", err_fun, getFuncDesc(err_fun), err_arg)
- self.space.call_method(w_dict, 'update', w_item)
+ err_arg = w_key
+ raise oefmt(space.w_TypeError,
+ "%N%s got multiple values for keyword argument %s",
+ err_fun, get_func_desc(err_fun), err_arg)
+ space.call_method(w_dict, 'update', w_item)
while num_maps != 0:
self.popvalue()
num_maps -= 1
self.pushvalue(w_dict)
-
+
def BUILD_MAP_UNPACK(self, itemcount, next_instr):
- w_dict = self.space.newdict()
- dict_class = w_dict.__class__
+ space = self.space
+ w_dict = space.newdict()
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
- if not issubclass(w_item.__class__, dict_class):
+ if space.lookup(w_item, '__getitem__') is None:
raise oefmt(self.space.w_TypeError,
"'%T' object is not a mapping", w_item)
- self.space.call_method(w_dict, 'update', w_item)
+ space.call_method(w_dict, 'update', w_item)
while itemcount != 0:
self.popvalue()
itemcount -= 1
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -198,7 +198,6 @@
def get_native_fmtchar(self, fmt):
from rpython.rtyper.lltypesystem import rffi
- from sys import getsizeof
size = -1
if fmt[0] == '@':
f = fmt[1]
@@ -215,7 +214,7 @@
elif f == 'q' or f == 'Q':
size = rffi.sizeof(rffi.LONGLONG)
elif f == 'n' or f == 'N':
- size = getsizeof(rffi.r_ssize_t)
+ size = rffi.sizeof(rffi.SIZE_T)
elif f == 'f':
size = rffi.sizeof(rffi.FLOAT)
elif f == 'd':
@@ -225,13 +224,13 @@
elif f == 'P':
size = rffi.sizeof(rffi.VOIDP)
return size
-
- def descr_cast(self, space, w_args, w_kwds):
+
+ def descr_cast(self, space, w_format, w_shape=None):
# XXX fixme. does not do anything near cpython (see memoryobjet.c memory_cast)
- #self._check_released(space)
- #newitemsize = self.get_native_fmtchar(w_args._val(w_args))
- return W_MemoryView(self.buf, self.format, self.itemsize)
- return mv
+ self._check_released(space)
+ fmt = space.str_w(w_format)
+ newitemsize = self.get_native_fmtchar(fmt)
+ return W_MemoryView(self.buf, fmt, newitemsize)
W_MemoryView.typedef = TypeDef(
From pypy.commits at gmail.com Sat Aug 6 03:22:50 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 00:22:50 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <57a5904a.8f8e1c0a.694f0.22d2@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r774:b523df27f1bf
Date: 2016-08-06 09:25 +0200
http://bitbucket.org/pypy/pypy.org/changeset/b523df27f1bf/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $64781 of $105000 (61.7%)
+ $64800 of $105000 (61.7%)
@@ -23,7 +23,7 @@
This donation goes towards supporting Python 3 in PyPy.
Current status:
- we have $5266 left
+ we have $5283 left
in the account. Read proposal
diff --git a/don4.html b/don4.html
--- a/don4.html
+++ b/don4.html
@@ -9,7 +9,7 @@
@@ -17,7 +17,7 @@
2nd call:
- $30794 of $80000 (38.5%)
+ $30845 of $80000 (38.6%)
@@ -25,7 +25,7 @@
This donation goes towards supporting the Transactional Memory in PyPy.
Current status:
- we have $23459 left
+ we have $23502 left
in the account. Read proposal (2nd call)
From pypy.commits at gmail.com Sat Aug 6 07:00:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 04:00:42 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: Update the "Installing NumPy"
section. Push numpy-via-cpyext forward.
Message-ID: <57a5c35a.c41f1c0a.c04bb.6bd6@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r775:2f768f8073b0
Date: 2016-08-06 13:02 +0200
http://bitbucket.org/pypy/pypy.org/changeset/2f768f8073b0/
Log: Update the "Installing NumPy" section. Push numpy-via-cpyext
forward.
diff --git a/download.html b/download.html
--- a/download.html
+++ b/download.html
@@ -206,14 +206,38 @@
Installing more modules
The recommended way is to install pip, which is the standard package
manager of Python. It works like it does on CPython as explained in the
-installation documentation
+installation documentation.
Installing NumPy
-
NumPy is an exception to the rule that most packages work without
-changes. The “numpy” module needs to be installed from our own
-repository rather than from the official source.
-
If you have pip:
+
There are two different versions of NumPy for PyPy.
+
+
1. NumPy via cpyext
+
The generally recommended way is to install the original NumPy via the
+CPython C API compatibility layer, cpyext. Modern versions of PyPy
+support enough of the C API to make this a reasonable choice in many
+cases. Performance-wise, the speed is mostly the same as CPython's
+NumPy (it is the same code); the exception is that interactions between
+the Python side and NumPy objects are mediated through the slower cpyext
+layer (which hurts a few benchmarks that do a lot of element-by-element
+array accesses, for example).
+
Installation works as usual. For example, without using a virtualenv:
+
+$ ./pypy-xxx/bin/pypy -m ensurepip
+$ ./pypy-xxx/bin/pip install numpy
+
+
(See the general installation documentation for more.)
+
+
+
2. NumPyPy
+
The “numpy” module can be installed from our own repository rather
+than from the official source. This version uses internally our
+built-in _numpypy module. This module is slightly incomplete.
+Also, its performance is hard to predict exactly. For regular NumPy
+source code that handles large arrays, it is likely to be slower than
+the native NumPy with cpyext. It is faster on the kind of code that
+contains many Python loops doing things on an element-by-element basis.
+
Installation (see the installation documentation for installing pip):
pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
@@ -227,10 +251,11 @@
sudo pypy -c 'import numpy'
-
Note that NumPy support is still a work-in-progress, many things do not
-work and those that do may not be any faster than NumPy on CPython.
+
Note again that this version is still a work-in-progress: many things do
+not work and those that do may not be any faster than NumPy on CPython.
For further instructions see the pypy/numpy repository.
+
Building from source
(see more build instructions)
diff --git a/source/download.txt b/source/download.txt
--- a/source/download.txt
+++ b/source/download.txt
@@ -216,7 +216,7 @@
The recommended way is to install ``pip``, which is the standard package
manager of Python. It works like it does on CPython as explained in the
-`installation documentation`_
+`installation documentation`_.
.. _installation documentation: http://doc.pypy.org/en/latest/install.html
@@ -224,13 +224,43 @@
Installing NumPy
-------------------------------
-NumPy is an exception to the rule that most packages work without
-changes. The "numpy" module needs to be installed from `our own
-repository`__ rather than from the official source.
+**There are two different versions of NumPy for PyPy.**
+
+
+1. NumPy via cpyext
++++++++++++++++++++
+
+The generally recommended way is to install the original NumPy via the
+CPython C API compatibility layer, cpyext. Modern versions of PyPy
+support enough of the C API to make this a reasonable choice in many
+cases. Performance-wise, the speed is mostly the same as CPython's
+NumPy (it is the same code); the exception is that interactions between
+the Python side and NumPy objects are mediated through the slower cpyext
+layer (which hurts a few benchmarks that do a lot of element-by-element
+array accesses, for example).
+
+Installation works as usual. For example, without using a virtualenv::
+
+ $ ./pypy-xxx/bin/pypy -m ensurepip
+ $ ./pypy-xxx/bin/pip install numpy
+
+(See the general `installation documentation`_ for more.)
+
+
+2. NumPyPy
+++++++++++
+
+The "numpy" module can be installed from `our own repository`__ rather
+than from the official source. This version uses internally our
+built-in ``_numpypy`` module. This module is slightly incomplete.
+Also, its performance is hard to predict exactly. For regular NumPy
+source code that handles large arrays, it is likely to be slower than
+the native NumPy with cpyext. It is faster on the kind of code that
+contains many Python loops doing things on an element-by-element basis.
.. __: https://bitbucket.org/pypy/numpy
-If you have pip::
+Installation (see the `installation documentation`_ for installing ``pip``)::
pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
@@ -244,8 +274,8 @@
sudo pypy -c 'import numpy'
-Note that NumPy support is still a work-in-progress, many things do not
-work and those that do may not be any faster than NumPy on CPython.
+Note again that this version is still a work-in-progress: many things do
+not work and those that do may not be any faster than NumPy on CPython.
For further instructions see `the pypy/numpy repository`__.
.. __: https://bitbucket.org/pypy/numpy
From pypy.commits at gmail.com Sat Aug 6 10:54:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 07:54:33 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: rvmprof: record the
correct Python frames during pyjitpl
Message-ID: <57a5fa29.82ddc20a.5e0f1.056f@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86046:11f391e1f1d6
Date: 2016-08-06 16:56 +0200
http://bitbucket.org/pypy/pypy/changeset/11f391e1f1d6/
Log: rvmprof: record the correct Python frames during pyjitpl
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -452,6 +452,8 @@
prepare = self._handle_math_sqrt_call
elif oopspec_name.startswith('rgc.'):
prepare = self._handle_rgc_call
+ elif oopspec_name.startswith('rvmprof.'):
+ prepare = self._handle_rvmprof_call
elif oopspec_name.endswith('dict.lookup'):
# also ordereddict.lookup
prepare = self._handle_dict_lookup_call
@@ -2079,6 +2081,22 @@
else:
raise NotImplementedError(oopspec_name)
+ def _handle_rvmprof_call(self, op, oopspec_name, args):
+ if oopspec_name == 'rvmprof.enter_code':
+ leaving = 0
+ elif oopspec_name == 'rvmprof.leave_code':
+ leaving = 1
+ else:
+ raise NotImplementedError(oopspec_name)
+ c_leaving = Constant(leaving, lltype.Signed)
+ v_uniqueid = op.args[-1]
+ ops = [SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)]
+ if op.result.concretetype is not lltype.Void:
+ c_null = Constant(lltype.nullptr(op.result.concretetype.TO),
+ op.result.concretetype)
+ ops.append(c_null)
+ return ops
+
def rewrite_op_ll_read_timestamp(self, op):
op1 = self.prepare_builtin_call(op, "ll_read_timestamp", [])
return self.handle_residual_call(op1,
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -140,7 +140,6 @@
def encoding_test(self, func, args, expected,
transform=False, liveness=False, cc=None, jd=None):
-
graphs = self.make_graphs(func, args)
#graphs[0].show()
if transform:
@@ -1112,6 +1111,20 @@
assert str(e.value).startswith("A virtualizable array is passed aroun")
assert "" in str(e.value)
+ def test_rvmprof_code(self):
+ from rpython.rlib.rvmprof import cintf
+ class MyFakeCallControl(FakeCallControl):
+ def guess_call_kind(self, op):
+ return 'builtin'
+ def f(x):
+ s = cintf.enter_code(x)
+ cintf.leave_code(s, x)
+ self.encoding_test(f, [42], """
+ rvmprof_code $0, %i0
+ rvmprof_code $1, %i0
+ void_return
+ """, transform=True, cc=MyFakeCallControl())
+
def check_force_cast(FROM, TO, operations, value):
"""Check that the test is correctly written..."""
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -1501,6 +1501,10 @@
def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length):
cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length)
+ @arguments("i", "i")
+ def bhimpl_rvmprof_code(leaving, unique_id):
+ pass #import pdb;pdb.set_trace()
+
# ----------
# helpers to resume running in blackhole mode when a guard failed
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1453,6 +1453,15 @@
metainterp.history.record(rop.VIRTUAL_REF_FINISH,
[vrefbox, nullbox], None)
+ @arguments("int", "box")
+ def opimpl_rvmprof_code(self, leaving, box_unique_id):
+ from rpython.rlib.rvmprof import cintf
+ unique_id = box_unique_id.getint()
+ if not leaving:
+ cintf.enter_code(unique_id)
+ else:
+ cintf.leave_code_check(unique_id)
+
# ------------------------------
def setup_call(self, argboxes):
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -6,7 +6,7 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
-from rpython.rlib import rthread
+from rpython.rlib import rthread, jit
class VMProfPlatformUnsupported(Exception):
pass
@@ -86,6 +86,22 @@
ExternalCompilationInfo(includes=['vmprof_stack.h'],
include_dirs = [SRC]))
+# JIT notes:
+#
+# - When running JIT-generated assembler code, we have different custom
+# code to build the VMPROFSTACK, so the functions below are not used.
+#
+# - The jitcode for decorated_function() in rvmprof.py still contains
+# calls to these two oopspec functions, which are represented with
+# the 'rvmprof_code' jitcode opcode.
+#
+# - When meta-interpreting, the 'rvmprof_code' opcode causes pyjitpl
+# to call enter_code()/leave_code_check(), but otherwise
+# 'rvmprof_code' is ignored, i.e. doesn't produce any resop.
+#
+# - Blackhole: ...
+
+ at jit.oopspec("rvmprof.enter_code(unique_id)")
def enter_code(unique_id):
do_use_eci()
s = lltype.malloc(VMPROFSTACK, flavor='raw')
@@ -95,6 +111,12 @@
vmprof_tl_stack.setraw(s)
return s
-def leave_code(s):
+ at jit.oopspec("rvmprof.leave_code(s, unique_id)")
+def leave_code(s, unique_id):
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
+
+def leave_code_check(unique_id):
+ s = vmprof_tl_stack.getraw()
+ assert s.c_value == unique_id
+ leave_code(s, unique_id)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -1,6 +1,6 @@
import sys, os
from rpython.rlib.objectmodel import specialize, we_are_translated
-from rpython.rlib import jit, rposix
+from rpython.rlib import rposix
from rpython.rlib.rvmprof import cintf
from rpython.rtyper.annlowlevel import cast_instance_to_gcref
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
@@ -162,12 +162,19 @@
"""
if _hack_update_stack_untranslated:
from rpython.rtyper.annlowlevel import llhelper
- enter_code = llhelper(lltype.Ptr(
+ from rpython.rlib import jit
+ enter_code_untr = llhelper(lltype.Ptr(
lltype.FuncType([lltype.Signed], cintf.PVMPROFSTACK)),
cintf.enter_code)
- leave_code = llhelper(lltype.Ptr(
- lltype.FuncType([cintf.PVMPROFSTACK], lltype.Void)),
+ leave_code_untr = llhelper(lltype.Ptr(
+ lltype.FuncType([cintf.PVMPROFSTACK, lltype.Signed], lltype.Void)),
cintf.leave_code)
+ @jit.oopspec("rvmprof.enter_code(unique_id)")
+ def enter_code(unique_id):
+ return enter_code_untr(unique_id)
+ @jit.oopspec("rvmprof.leave_code(s)")
+ def leave_code(s, unique_id):
+ leave_code_untr(s, unique_id)
else:
enter_code = cintf.enter_code
leave_code = cintf.leave_code
@@ -179,17 +186,12 @@
return func
def decorated_function(*args):
- # If we are being JITted, we want to skip the trampoline, else the
- # JIT cannot see through it.
- if not jit.we_are_jitted():
- unique_id = get_code_fn(*args)._vmprof_unique_id
- x = enter_code(unique_id)
- try:
- return func(*args)
- finally:
- leave_code(x)
- else:
+ unique_id = get_code_fn(*args)._vmprof_unique_id
+ x = enter_code(unique_id)
+ try:
return func(*args)
+ finally:
+ leave_code(x, unique_id)
decorated_function.__name__ = func.__name__ + '_rvmprof'
return decorated_function
From pypy.commits at gmail.com Sat Aug 6 11:12:07 2016
From: pypy.commits at gmail.com (fijal)
Date: Sat, 06 Aug 2016 08:12:07 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: expose some other
problems
Message-ID: <57a5fe47.031dc20a.af26a.1645@mx.google.com>
Author: fijal
Branch: improve-vmprof-testing
Changeset: r86047:8ba040124913
Date: 2016-08-06 17:11 +0200
http://bitbucket.org/pypy/pypy/changeset/8ba040124913/
Log: expose some other problems
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -23,13 +23,13 @@
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=['code'], reds='auto')
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
class CodeObj(object):
def __init__(self, name):
self.name = name
- def get_code_fn(codes, code, arg):
+ def get_code_fn(codes, code, arg, c):
return code
def get_name(code):
@@ -40,21 +40,26 @@
@vmprof_execute_code("main", get_code_fn,
_hack_update_stack_untranslated=True)
- def f(codes, code, n):
+ def f(codes, code, n, c):
i = 0
while i < n:
- driver.jit_merge_point(code=code)
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
if code.name == "main":
- f(codes, codes[1], 1)
+ c = f(codes, codes[1], 1, c)
+ driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
else:
llfn()
+ c -= 1
+ if c < 0:
+ llfn() # bridge
i += 1
+ return c
def main(n):
codes = [CodeObj("main"), CodeObj("not main")]
for code in codes:
register_code(code, get_name)
- return f(codes, codes[0], n)
+ return f(codes, codes[0], n, 8)
class Hooks(jit.JitHookInterface):
def after_compile(self, debug_info):
@@ -64,8 +69,9 @@
null = lltype.nullptr(cintf.VMPROFSTACK)
cintf.vmprof_tl_stack.setraw(null)
- self.meta_interp(main, [10], policy=JitPolicy(hooks))
+ self.meta_interp(main, [30], policy=JitPolicy(hooks), inline=True)
assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+ print visited
#v = set(visited)
#assert 0 in v
#v.remove(0)
From pypy.commits at gmail.com Sat Aug 6 13:29:40 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 10:29:40 -0700 (PDT)
Subject: [pypy-commit] cffi default: Update the version number to 1.8
Message-ID: <57a61e84.8bc71c0a.559d7.fbba@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2732:875dcaa2b519
Date: 2016-08-06 19:32 +0200
http://bitbucket.org/cffi/cffi/changeset/875dcaa2b519/
Log: Update the version number to 1.8
diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c
--- a/c/_cffi_backend.c
+++ b/c/_cffi_backend.c
@@ -2,7 +2,7 @@
#include
#include "structmember.h"
-#define CFFI_VERSION "1.7.0"
+#define CFFI_VERSION "1.8.0"
#ifdef MS_WIN32
#include
diff --git a/c/test_c.py b/c/test_c.py
--- a/c/test_c.py
+++ b/c/test_c.py
@@ -12,7 +12,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
diff --git a/cffi/__init__.py b/cffi/__init__.py
--- a/cffi/__init__.py
+++ b/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/cffi/_embedding.h b/cffi/_embedding.h
--- a/cffi/_embedding.h
+++ b/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -45,9 +45,9 @@
# built documents.
#
# The short X.Y version.
-version = '1.7'
+version = '1.8'
# The full version, including alpha/beta/rc tags.
-release = '1.7.0'
+release = '1.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -51,13 +51,13 @@
Download and Installation:
-* http://pypi.python.org/packages/source/c/cffi/cffi-1.7.0.tar.gz
+* http://pypi.python.org/packages/source/c/cffi/cffi-1.8.0.tar.gz
- - MD5: 34122a545060cee58bab88feab57006d
+ - MD5: ...
- - SHA: d8033f34e17c0c51bb834b27f6e8c59fc24ae72c
+ - SHA: ...
- - SHA256: 6ed5dd6afd8361f34819c68aaebf9e8fc12b5a5893f91f50c9e50c8886bb60df
+ - SHA256: ...
* Or grab the most current version from the `Bitbucket page`_:
``hg clone https://bitbucket.org/cffi/cffi``
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -144,7 +144,7 @@
`Mailing list `_
""",
- version='1.7.0',
+ version='1.8.0',
packages=['cffi'] if cpython else [],
package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h',
'_embedding.h']}
From pypy.commits at gmail.com Sat Aug 6 13:33:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 10:33:39 -0700 (PDT)
Subject: [pypy-commit] pypy default: Update to cffi 1.8.0
Message-ID: <57a61f73.17a61c0a.a160.fc13@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86048:fa7bd4dae2a8
Date: 2016-08-06 19:35 +0200
http://bitbucket.org/pypy/pypy/changeset/fa7bd4dae2a8/
Log: Update to cffi 1.8.0
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.7.0
+Version: 1.8.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -196,20 +196,6 @@
return NULL;
}
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
- const char *fnname)
-{
- if (PyTuple_GET_SIZE(args_tuple) != expected) {
- PyErr_Format(PyExc_TypeError,
- "%.150s() takes exactly %zd arguments (%zd given)",
- fnname, expected, PyTuple_GET_SIZE(args_tuple));
- return NULL;
- }
- return &PyTuple_GET_ITEM(args_tuple, 0); /* pointer to the first item,
- the others follow */
-}
-
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
+ if self.ffi._embedding is None:
+ prnt('#define Py_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
@@ -683,13 +685,11 @@
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
- prnt(' PyObject **aa;')
prnt()
- prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
- prnt(' if (aa == NULL)')
+ prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+ name, len(rng), len(rng),
+ ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
- for i in rng:
- prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
+ self._check_not_opaque(fldtype,
+ "field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
@@ -911,6 +913,13 @@
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
+ def _check_not_opaque(self, tp, location):
+ while isinstance(tp, model.ArrayType):
+ tp = tp.item
+ if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+ raise TypeError(
+ "%s is of an opaque type (not declared in cdef())" % location)
+
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.7.0"
+VERSION = "1.8.0"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
@@ -130,7 +130,7 @@
cls.module = str(udir.join('testownlib.dll'))
else:
subprocess.check_call(
- 'gcc testownlib.c -shared -fPIC -o testownlib.so',
+ 'cc testownlib.c -shared -fPIC -o testownlib.so',
cwd=str(udir), shell=True)
cls.module = str(udir.join('testownlib.so'))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -852,9 +852,12 @@
assert str(e2.value) == "foo0() takes no arguments (2 given)"
assert str(e3.value) == "foo1() takes exactly one argument (0 given)"
assert str(e4.value) == "foo1() takes exactly one argument (2 given)"
- assert str(e5.value) == "foo2() takes exactly 2 arguments (0 given)"
- assert str(e6.value) == "foo2() takes exactly 2 arguments (1 given)"
- assert str(e7.value) == "foo2() takes exactly 2 arguments (3 given)"
+ assert str(e5.value) in ["foo2 expected 2 arguments, got 0",
+ "foo2() takes exactly 2 arguments (0 given)"]
+ assert str(e6.value) in ["foo2 expected 2 arguments, got 1",
+ "foo2() takes exactly 2 arguments (1 given)"]
+ assert str(e7.value) in ["foo2 expected 2 arguments, got 3",
+ "foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
@@ -1916,3 +1919,35 @@
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+
+def test_struct_field_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[2]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+
+def test_function_arg_opaque():
+ py.test.skip("can currently declare a function with an opaque struct "
+ "as argument, but AFAICT it's impossible to call it later")
+
+def test_function_returns_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a foo(int);")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_function_returns_opaque", "?")
+ assert str(e.value) == ("function foo: 'struct a' is used as result type,"
+ " but is opaque")
From pypy.commits at gmail.com Sat Aug 6 14:17:47 2016
From: pypy.commits at gmail.com (mattip)
Date: Sat, 06 Aug 2016 11:17:47 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: suggest some tweaks,
feel free to revert
Message-ID: <57a629cb.8cc51c0a.2ceb1.0ca2@mx.google.com>
Author: Matti Picus
Branch: extradoc
Changeset: r5664:d5eaf9f670d2
Date: 2016-08-06 21:17 +0300
http://bitbucket.org/pypy/extradoc/changeset/d5eaf9f670d2/
Log: suggest some tweaks, feel free to revert
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -2,38 +2,51 @@
=======
We are happy to announce a major JitViewer (JV) update.
-JV allows you to inspect PyPy's internal compiler representation including the generated machine code of your program.
-A useful tool to spot issues in your program and learn PyPy's compiler details.
+JV allows you to inspect RPython's internal compiler representation (the language in which PyPy is implemented)
+including the generated machine code of your program.
+It can graphically show you details of the RPython compiled code and helps you pinpoint issues in your code.
-VMProf is a statistical cpu profiler imposing very little overhead at runtime.
+VMProf is a statistical CPU profiler for python imposing very little overhead at runtime.
-Both VMProf and JitViewer share a common goal: Present useful information for your Python program.
-The combination of both might reveal more information. That is the reason why they are now both packaged together.
-www.vmprof.com also got updated with various bugfixes and changes including an all new interface to JV.
+Both VMProf and JitViewer share a common goal: Present useful information for your python program.
+The combination of both can reveal more information than either alone.
+That is the reason why they are now both packaged together.
+We also updated www.vmprof.com with various bug fixes and changes including an all new interface to JV.
-An advertisment: We constantly improve tooling and libraries around the Python/PyPy eco system.
-Here are a four examples you might also want to use in your Python projects:
+This work was done with the goal of improving tooling and libraries around the Python/PyPy/RPython ecosystem.
+Some of the tools we have developed:
+
+* CFFI - Foreign Function Interface that avoids CPyExt (http://cffi.readthedocs.io/en/latest/)
+* RevDB - A reverse debugger for python (https://morepypy.blogspot.co.at/2016/07/reverse-debugging-for-python.html)
+
+and of course the tools we discuss here:
* VMProf - A statistical CPU profiler (http://vmprof.readthedocs.io/en/latest/)
-* RevDB - A reverse debugger for Python (https://morepypy.blogspot.co.at/2016/07/reverse-debugging-for-python.html)
-* CFFI - Foreign Function Interface that avoids CPyExt (http://cffi.readthedocs.io/en/latest/)
-* JitViewer - Visualization of the log file produced by PyPy (http://vmprof.readthedocs.io/en/latest/)
+* JitViewer - Visualization of the log file produced by RPython (http://vmprof.readthedocs.io/en/latest/)
A "brand new" JitViewer
---------------------
-The old logging format was a hard to maintain plain text logging facility. Frequent changes often broke internal tools. Additionaly the logging output of a long running program took a lot of disk space.
+JitViewer has two pieces: you create a log file when running your program, and then use a graphic tool to view what happened.
-Our new binary format encodes data densly, makes use of some compression (gzip) and tries to remove repetition where possible. On top of that it supports versioning and can be extended easily. And *drumroll* you do not need to install JV yourself anymore! The whole system moved to vmprof.com and you can use it any time.
+The old logging format was a hard-to-maintain, plain-text-logging facility. Frequent changes often broke internal tools.
+Additionally, the logging output of a long running program required a lot of disk space.
-Sounds great. But what can you do with it? Here are two examples useful for a PyPy user:
+Our new binary format encodes data densely, makes use of some compression (gzip), and tries to remove repetition where possible.
+It also supports versioning for future proofing and can be extended easily.
+
+And *drumroll* you no longer need to install a tool to view the log yourself
+anymore! The whole system moved to vmprof.com and you can use it any time.
+
+Sounds great. But what can you do with it? Here are two examples for a PyPy user:
PyPy crashed? Did you discover a bug?
-------------------
-For some hard to find bugs it is often necessary to look at the compiled code. The old procedure often required to upload a plain text file which was hard to parse and to look through.
+For some hard to find bugs it is often necessary to look at the compiled code. The old
+procedure often required you to upload a plain text file which was hard to parse and to look through.
-A new way to share a crash report is to install the ``vmprof`` module from PyPi and execute either of the two commands:
+A better way to share a crash report is to install the ``vmprof`` module from PyPi and execute either of the two commands:
```
# this program does not crash, but has some weird behaviour
@@ -48,12 +61,12 @@
PyPy Jitlog: http://vmprof.com/#//traces
```
-Providing the link in the bug report enables PyPy developers browse and identify potential issues.
+Providing the link in the bug report allows PyPy developers to browse and identify potential issues.
Speed issues
------------
-VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have idenified code that runs slow, you can switch to jitlog and maybe pin point certain aspects that do not behave as expected. You will find not only the overview, but are also able to browse the generated code. If you cannot make sense of that all you can just share the link with us and we can have a look at too.
+VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have identified code that runs slowly, you can switch to jitlog and maybe pinpoint certain aspects that do not behave as expected. You will find an overview, and are able to browse the generated code. If you cannot make sense of all that, you can just share the link with us and we can have a look too.
Future direction
----------------
@@ -62,11 +75,11 @@
Here are a few ideas what might come in the next few releases:
-* Combination of CPU profiles and the JITLOG (Sadly did not make it into the current release)
+* Combination of CPU profiles and the JITLOG (sadly did not make it into the current release).
* Extend vmprof.com to be able to query vmprof/jitlog. An example query for vmprof: 'methods.callsites() > 5' and for the jitlog would be 'traces.contains('call_assembler').hasbridge('*my_func_name*')'.
-* Extend the jitlog to capture the information of the optimization stage
+* Extend the jitlog to capture the information of the optimization stage.
Richard Plangger (plan_rich) and the PyPy team
From pypy.commits at gmail.com Sat Aug 6 14:24:54 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 11:24:54 -0700 (PDT)
Subject: [pypy-commit] cffi default: Seems that as a #define,
this hack breaks if a future header does
Message-ID: <57a62b76.c70a1c0a.ea799.0509@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2733:6ddf5a9d1409
Date: 2016-08-06 20:27 +0200
http://bitbucket.org/cffi/cffi/changeset/6ddf5a9d1409/
Log: Seems that as a #define, this hack breaks if a future header does
"typedef bool _Bool;". With a typedef, though, we get two identical
typedefs, which at least g++ is perfectly happy with
diff --git a/cffi/_cffi_include.h b/cffi/_cffi_include.h
--- a/cffi/_cffi_include.h
+++ b/cffi/_cffi_include.h
@@ -59,7 +59,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
From pypy.commits at gmail.com Sat Aug 6 14:41:00 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Sat, 06 Aug 2016 11:41:00 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix test for build_set_unpack,
add test for build_map_unpack_with_call, fix getFuncDesc
Message-ID: <57a62f3c.4bc41c0a.32732.0e98@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86049:7f49d892fb4d
Date: 2016-08-06 20:40 +0200
http://bitbucket.org/pypy/pypy/changeset/7f49d892fb4d/
Log: Fix test for build_set_unpack, add test for
build_map_unpack_with_call, fix getFuncDesc
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1374,10 +1374,10 @@
w_sum = self.unpack_helper(itemcount, next_instr)
self.pushvalue(self.space.newlist(w_sum))
- def getFuncDesc(func):
- if self.space.type(aaa).name.decode('utf-8') == 'method':
+ def getFuncDesc(self, func):
+ if self.space.type(func).name.decode('utf-8') == 'method':
return "()"
- elif self.space.type(aaa).name.decode('utf-8') == 'function':
+ elif self.space.type(func).name.decode('utf-8') == 'function':
return "()"
else:
return " object";
@@ -1403,7 +1403,7 @@
err_fun = self.peekvalue(num_maps + function_location-1)
err_arg = self.space.unicode_w(keys.getitem(j))
raise oefmt(self.space.w_TypeError,
- "%N%s got multiple values for keyword argument %s", err_fun, getFuncDesc(err_fun), err_arg)
+ "%N%s got multiple values for keyword argument '%s'", err_fun, self.getFuncDesc(err_fun), err_arg)
self.space.call_method(w_dict, 'update', w_item)
while num_maps != 0:
self.popvalue()
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -261,7 +261,10 @@
code = """ def f():
return {*range(4), 4, *(5, 6, 7)}
"""
- assert self.codetest(code, "f", []) == {0, 1, 2, 3, 4, 5, 6, 7}
+ space = self.space
+ res = self.codetest(code, "f", [])
+ l_res = space.call_function(space.w_list, res)
+ assert space.unwrap(l_res) == [0, 1, 2, 3, 4, 5, 6, 7]
def test_build_tuple_unpack(self):
code = """ def f():
@@ -286,6 +289,30 @@
res = self.codetest(code, 'g', [])
assert "TypeError:" in res
assert "'tuple' object is not a mapping" in res
+
+ def test_build_map_unpack_with_call(self):
+ code = """
+ def f(a,b,c,d):
+ return a+b,c+d
+ def g1():
+ return f(**{'a': 1, 'c': 3}, **{'b': 2, 'd': 4})
+ def g2():
+ return f(**{'a': 1, 'c': 3}, **[])
+ def g3():
+ return f(**{'a': 1, 'c': 3}, **{1: 3})
+ def g4():
+ return f(**{'a': 1, 'c': 3}, **{'a': 2})
+ """
+ assert self.codetest(code, "g1", []) == (3, 7)
+ resg2 = self.codetest(code, 'g2', [])
+ assert "TypeError:" in resg2
+ assert "'list' object is not a mapping" in resg2
+ resg3 = self.codetest(code, 'g3', [])
+ assert "TypeError:" in resg3
+ assert "keywords must be strings" in resg3
+ resg4 = self.codetest(code, 'g4', [])
+ assert "TypeError:" in resg4
+ assert "f() got multiple values for keyword argument 'a'" in resg4
class AppTestInterpreter:
From pypy.commits at gmail.com Sat Aug 6 14:49:53 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 11:49:53 -0700 (PDT)
Subject: [pypy-commit] pypy default: Import cffi/6ddf5a9d1409
Message-ID: <57a63151.45c8c20a.3d264.b421@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86050:67a7286bfdd2
Date: 2016-08-06 20:49 +0200
http://bitbucket.org/pypy/pypy/changeset/67a7286bfdd2/
Log: Import cffi/6ddf5a9d1409
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -59,7 +59,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
From pypy.commits at gmail.com Sat Aug 6 15:02:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 12:02:43 -0700 (PDT)
Subject: [pypy-commit] cffi default: Update docs
Message-ID: <57a63453.4317c20a.5ff9b.5d62@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2734:afc6e9c21056
Date: 2016-08-06 21:05 +0200
http://bitbucket.org/cffi/cffi/changeset/afc6e9c21056/
Log: Update docs
diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst
--- a/doc/source/cdef.rst
+++ b/doc/source/cdef.rst
@@ -837,8 +837,8 @@
.. __: distutils-setuptools_
The following example should work both with old (pre-1.0) and new
-versions of CFFI---supporting both is important to run on PyPy,
-because CFFI 1.0 does not work in PyPy < 2.6:
+versions of CFFI---supporting both is important to run on old
+versions of PyPy (CFFI 1.0 does not work in PyPy < 2.6):
.. code-block:: python
diff --git a/doc/source/ref.rst b/doc/source/ref.rst
--- a/doc/source/ref.rst
+++ b/doc/source/ref.rst
@@ -627,12 +627,12 @@
*`` argument might be passed as ``[[x, y]]`` or ``[{'x': 5, 'y':
10}]``.
- As an optimization, the CPython version of CFFI assumes that a
+ As an optimization, CFFI assumes that a
function with a ``char *`` argument to which you pass a Python
string will not actually modify the array of characters passed in,
and so passes directly a pointer inside the Python string object.
- (PyPy might in the future do the same, but it is harder because
- strings are not naturally zero-terminated in PyPy.)
+ (On PyPy, this optimization is only available since PyPy 5.4
+ with CFFI 1.8.)
`(**)` C function calls are done with the GIL released.
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst
--- a/doc/source/whatsnew.rst
+++ b/doc/source/whatsnew.rst
@@ -7,7 +7,14 @@
====
* Removed the restriction that ``ffi.from_buffer()`` cannot be used on
- byte strings (PyPy was improved and can now support that case).
+ byte strings. Now you can get a ``char *`` out of a byte string,
+ which is valid as long as the string object is kept alive. (But
+ don't use it to *modify* the string object! If you need this, use
+ ``bytearray`` or other official techniques.)
+
+* PyPy 5.4 can now pass a byte string directly to a ``char *``
+ argument (in older versions, a copy would be made). This used to be
+ a CPython-only optimization.
v1.7
From pypy.commits at gmail.com Sat Aug 6 16:13:57 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 13:13:57 -0700 (PDT)
Subject: [pypy-commit] cffi default: Avoid a combination where we end up
with two incompatible "typedef xxx
Message-ID: <57a64505.03121c0a.f3fb7.2b71@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2735:d9c892b5a64b
Date: 2016-08-06 22:16 +0200
http://bitbucket.org/cffi/cffi/changeset/d9c892b5a64b/
Log: Avoid a combination where we end up with two incompatible "typedef
xxx _Bool;"
diff --git a/cffi/_cffi_include.h b/cffi/_cffi_include.h
--- a/cffi/_cffi_include.h
+++ b/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py
--- a/testing/cffi1/test_recompiler.py
+++ b/testing/cffi1/test_recompiler.py
@@ -1919,6 +1919,18 @@
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
def test_struct_field_opaque():
ffi = FFI()
ffi.cdef("struct a { struct b b; };")
From pypy.commits at gmail.com Sat Aug 6 16:15:13 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 06 Aug 2016 13:15:13 -0700 (PDT)
Subject: [pypy-commit] pypy default: update cffi/d9c892b5a64b
Message-ID: <57a64551.919a1c0a.d74cb.2be9@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86051:68c75f95713b
Date: 2016-08-06 22:17 +0200
http://bitbucket.org/pypy/pypy/changeset/68c75f95713b/
Log: update cffi/d9c892b5a64b
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1920,6 +1920,18 @@
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
def test_struct_field_opaque():
ffi = FFI()
ffi.cdef("struct a { struct b b; };")
From pypy.commits at gmail.com Sun Aug 7 03:30:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 00:30:39 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: in-progress
Message-ID: <57a6e39f.a717c20a.155f5.0a4c@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86052:271af761ce0b
Date: 2016-08-07 09:32 +0200
http://bitbucket.org/pypy/pypy/changeset/271af761ce0b/
Log: in-progress
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -7,7 +7,73 @@
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
+
class BaseRVMProfTest(object):
+
+ def setup_method(self, meth):
+ visited = []
+
+ def helper():
+ trace = []
+ stack = cintf.vmprof_tl_stack.getraw()
+ while stack:
+ trace.append((stack.c_kind, stack.c_value))
+ stack = stack.c_next
+ visited.append(trace)
+
+ llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
+
+ class CodeObj(object):
+ def __init__(self, name):
+ self.name = name
+
+ def get_code_fn(codes, code, arg, c):
+ return code
+
+ def get_name(code):
+ return "foo"
+
+ _get_vmprof().use_weaklist = False
+ register_code_object_class(CodeObj, get_name)
+
+ self.misc = visited, llfn, CodeObj, get_code_fn, get_name
+
+
+ def teardown_method(self, meth):
+ del _get_vmprof().use_weaklist
+
+
+ def test_simple(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ c = f(codes, codes[1], 1, c)
+ driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ return c
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ return f(codes, codes[0], n, 8)
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
def test_one(self):
# py.test.skip("needs thread-locals in the JIT, which is only available "
# "after translation")
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -2082,20 +2082,10 @@
raise NotImplementedError(oopspec_name)
def _handle_rvmprof_call(self, op, oopspec_name, args):
- if oopspec_name == 'rvmprof.enter_code':
- leaving = 0
- elif oopspec_name == 'rvmprof.leave_code':
- leaving = 1
- else:
+ if oopspec_name != 'rvmprof.code':
raise NotImplementedError(oopspec_name)
- c_leaving = Constant(leaving, lltype.Signed)
- v_uniqueid = op.args[-1]
- ops = [SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)]
- if op.result.concretetype is not lltype.Void:
- c_null = Constant(lltype.nullptr(op.result.concretetype.TO),
- op.result.concretetype)
- ops.append(c_null)
- return ops
+ c_leaving, v_uniqueid = args
+ return SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)
def rewrite_op_ll_read_timestamp(self, op):
op1 = self.prepare_builtin_call(op, "ll_read_timestamp", [])
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -14,7 +14,7 @@
from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_longlong, r_ulonglong
from rpython.rlib.jit import dont_look_inside, _we_are_jitted, JitDriver
from rpython.rlib.objectmodel import keepalive_until_here
-from rpython.rlib import jit
+from rpython.rlib import jit, debug
class FakeRegAlloc:
@@ -1115,14 +1115,26 @@
from rpython.rlib.rvmprof import cintf
class MyFakeCallControl(FakeCallControl):
def guess_call_kind(self, op):
- return 'builtin'
+ if '_code' in repr(op):
+ return 'builtin'
+ return 'residual'
+ class X:
+ pass
+ def g():
+ debug.debug_print("foo")
+ return X()
+ g._dont_inline_ = True
def f(x):
- s = cintf.enter_code(x)
- cintf.leave_code(s, x)
+ cintf.jit_rvmprof_code(0, x)
+ res = g()
+ cintf.jit_rvmprof_code(1, x)
+ return res
self.encoding_test(f, [42], """
rvmprof_code $0, %i0
+ residual_call_r_r $<* fn g>, R[], -> %r0
+ -live-
rvmprof_code $1, %i0
- void_return
+ ref_return %r0
""", transform=True, cc=MyFakeCallControl())
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -74,6 +74,8 @@
self.parent_snapshot = None
# counter for unrolling inlined loops
self.unroll_iterations = 1
+ # rvmprof
+ self.rvmprof_unique_id = -1
@specialize.arg(3)
def copy_constants(self, registers, constants, ConstClass):
@@ -1456,11 +1458,7 @@
@arguments("int", "box")
def opimpl_rvmprof_code(self, leaving, box_unique_id):
from rpython.rlib.rvmprof import cintf
- unique_id = box_unique_id.getint()
- if not leaving:
- cintf.enter_code(unique_id)
- else:
- cintf.leave_code_check(unique_id)
+ cintf.jit_rvmprof_code(leaving, box_unique_id.getint())
# ------------------------------
@@ -1813,6 +1811,7 @@
opimpl = _get_opimpl_method(name, argcodes)
self.opcode_implementations[value] = opimpl
self.op_catch_exception = insns.get('catch_exception/L', -1)
+ self.op_rvmprof_code = insns.get('rvmprof_code/ii', -1)
def setup_descrs(self, descrs):
self.opcode_descrs = descrs
@@ -2080,6 +2079,10 @@
target = ord(code[position+1]) | (ord(code[position+2])<<8)
frame.pc = target
raise ChangeFrame
+ if opcode == self.staticdata.op_rvmprof_code:
+ # do the 'leave_code' for rvmprof, but then continue
+ # popping frames
+ import pdb;pdb.set_trace()
self.popframe()
try:
self.compile_exit_frame_with_exception(self.last_exc_box)
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -86,22 +86,6 @@
ExternalCompilationInfo(includes=['vmprof_stack.h'],
include_dirs = [SRC]))
-# JIT notes:
-#
-# - When running JIT-generated assembler code, we have different custom
-# code to build the VMPROFSTACK, so the functions below are not used.
-#
-# - The jitcode for decorated_function() in rvmprof.py still contains
-# calls to these two oopspec functions, which are represented with
-# the 'rvmprof_code' jitcode opcode.
-#
-# - When meta-interpreting, the 'rvmprof_code' opcode causes pyjitpl
-# to call enter_code()/leave_code_check(), but otherwise
-# 'rvmprof_code' is ignored, i.e. doesn't produce any resop.
-#
-# - Blackhole: ...
-
- at jit.oopspec("rvmprof.enter_code(unique_id)")
def enter_code(unique_id):
do_use_eci()
s = lltype.malloc(VMPROFSTACK, flavor='raw')
@@ -111,12 +95,52 @@
vmprof_tl_stack.setraw(s)
return s
- at jit.oopspec("rvmprof.leave_code(s, unique_id)")
-def leave_code(s, unique_id):
+def leave_code(s):
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
-def leave_code_check(unique_id):
- s = vmprof_tl_stack.getraw()
- assert s.c_value == unique_id
- leave_code(s, unique_id)
+#
+# JIT notes:
+#
+# - When running JIT-generated assembler code, we have different custom
+# code to build the VMPROFSTACK, so the functions above are not used.
+# (It uses kind == VMPROF_JITTED_TAG and the VMPROFSTACK is allocated
+# in the C stack.)
+#
+# - The jitcode for decorated_jitted_function() in rvmprof.py, if
+# we_are_jitted() calls the oopspec'ed function jit_rvmprof_code(),
+# which turns into a simple jitcode opcode. The jitcode has a
+# simple structure:
+#
+# rvmprof_code(0, unique_id)
+# res = inline_call FUNC
+# rvmprof_code(1, unique_id)
+#
+# with no catch_exception logic for a "finally:" block. Instead the
+# blackhole interp looks for this simple pattern. This is needed
+# because, when a guard fails, the blackhole interp first rebuilds
+# all the intermediate RPython frames; at that point it needs to
+# call enter_code() on all intermediate RPython frames, so it does
+# pattern matching to recognize frames and learn about unique_id.
+#
+# - The jitcode opcode 'rvmprof_code' doesn't produce any resop. When
+# meta-interpreting, it causes pyjitpl to call jit_enter_code(), and
+# jit_leave_code(). There is logic to call jit_leave_code() even if
+# we exit with an exception, even though there is no
+# 'catch_exception'.
+#
+# - When blackholing, the call to jit_enter_code() occurs imediately
+# as described above. For calling jit_leave_code(), we use the same
+# logic, detecting when we need to call it even though there is no
+# 'catch_exception'.
+
+ at jit.oopspec("rvmprof.code(leaving, unique_id)")
+def jit_rvmprof_code(leaving, unique_id):
+ """Marker for the JIT. Also called directly from the metainterp and
+ the blackhole interp."""
+ if not leaving:
+ enter_code(unique_id) # ignore the return value
+ else:
+ s = vmprof_tl_stack.getraw()
+ assert s.c_value == unique_id
+ leave_code(s)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -1,6 +1,6 @@
import sys, os
from rpython.rlib.objectmodel import specialize, we_are_translated
-from rpython.rlib import rposix
+from rpython.rlib import jit, rposix
from rpython.rlib.rvmprof import cintf
from rpython.rtyper.annlowlevel import cast_instance_to_gcref
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
@@ -162,19 +162,12 @@
"""
if _hack_update_stack_untranslated:
from rpython.rtyper.annlowlevel import llhelper
- from rpython.rlib import jit
- enter_code_untr = llhelper(lltype.Ptr(
+ enter_code = llhelper(lltype.Ptr(
lltype.FuncType([lltype.Signed], cintf.PVMPROFSTACK)),
cintf.enter_code)
- leave_code_untr = llhelper(lltype.Ptr(
- lltype.FuncType([cintf.PVMPROFSTACK, lltype.Signed], lltype.Void)),
+ leave_code = llhelper(lltype.Ptr(
+ lltype.FuncType([cintf.PVMPROFSTACK], lltype.Void)),
cintf.leave_code)
- @jit.oopspec("rvmprof.enter_code(unique_id)")
- def enter_code(unique_id):
- return enter_code_untr(unique_id)
- @jit.oopspec("rvmprof.leave_code(s)")
- def leave_code(s, unique_id):
- leave_code_untr(s, unique_id)
else:
enter_code = cintf.enter_code
leave_code = cintf.leave_code
@@ -185,13 +178,24 @@
except cintf.VMProfPlatformUnsupported:
return func
+ def decorated_jitted_function(unique_id, *args):
+ cintf.jit_rvmprof_code(0, unique_id)
+ res = func(*args)
+ cintf.jit_rvmprof_code(1, unique_id) # no 'finally:', see cintf.py
+ return res
+ decorated_jitted_function._dont_inline_ = True
+
def decorated_function(*args):
unique_id = get_code_fn(*args)._vmprof_unique_id
- x = enter_code(unique_id)
- try:
- return func(*args)
- finally:
- leave_code(x, unique_id)
+ if not jit.we_are_jitted():
+ x = enter_code(unique_id)
+ try:
+ return func(*args)
+ finally:
+ leave_code(x)
+ else:
+ return decorated_jitted_function(unique_id, *args)
+ decorated_function._always_inline_ = True
decorated_function.__name__ = func.__name__ + '_rvmprof'
return decorated_function
From pypy.commits at gmail.com Sun Aug 7 04:44:03 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 01:44:03 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: in-progress
Message-ID: <57a6f4d3.11051c0a.c5268.d5f9@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86053:2de09f51a345
Date: 2016-08-07 10:46 +0200
http://bitbucket.org/pypy/pypy/changeset/2de09f51a345/
Log: in-progress
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -55,7 +55,6 @@
driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
if code.name == "main":
c = f(codes, codes[1], 1, c)
- driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
else:
llfn()
c -= 1
@@ -74,6 +73,46 @@
assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+ def test_leaving_with_exception(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
def test_one(self):
# py.test.skip("needs thread-locals in the JIT, which is only available "
# "after translation")
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -2082,10 +2082,30 @@
raise NotImplementedError(oopspec_name)
def _handle_rvmprof_call(self, op, oopspec_name, args):
- if oopspec_name != 'rvmprof.code':
+ if oopspec_name != 'rvmprof.jitted':
raise NotImplementedError(oopspec_name)
- c_leaving, v_uniqueid = args
- return SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)
+ c_entering = Constant(0, lltype.Signed)
+ c_leaving = Constant(1, lltype.Signed)
+ v_uniqueid = args[0]
+ op1 = SpaceOperation('rvmprof_code', [c_entering, v_uniqueid], None)
+ op2 = SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)
+ #
+ # fish fish inside the oopspec's graph for the ll_func pointer
+ block = op.args[0].value._obj.graph.startblock
+ while True:
+ assert len(block.exits) == 1
+ nextblock = block.exits[0].target
+ if nextblock.operations == ():
+ break
+ block = nextblock
+ last_op = block.operations[-1]
+ assert last_op.opname == 'direct_call'
+ c_ll_func = last_op.args[0]
+ #
+ args = [c_ll_func] + op.args[2:]
+ ops = self.rewrite_op_direct_call(SpaceOperation('direct_call',
+ args, op.result))
+ return [op1] + ops + [op2]
def rewrite_op_ll_read_timestamp(self, op):
op1 = self.prepare_builtin_call(op, "ll_read_timestamp", [])
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -1115,23 +1115,22 @@
from rpython.rlib.rvmprof import cintf
class MyFakeCallControl(FakeCallControl):
def guess_call_kind(self, op):
- if '_code' in repr(op):
+ if 'jitted' in repr(op):
return 'builtin'
return 'residual'
class X:
pass
- def g():
+ def g(x, y):
debug.debug_print("foo")
return X()
- g._dont_inline_ = True
- def f(x):
- cintf.jit_rvmprof_code(0, x)
- res = g()
- cintf.jit_rvmprof_code(1, x)
- return res
- self.encoding_test(f, [42], """
+ @jit.oopspec("rvmprof.jitted(unique_id)")
+ def decorated_jitted_function(unique_id, *args):
+ return g(*args)
+ def f(id, x, y):
+ return decorated_jitted_function(id, x, y)
+ self.encoding_test(f, [42, 56, 74], """
rvmprof_code $0, %i0
- residual_call_r_r $<* fn g>, R[], -> %r0
+ residual_call_ir_r $<* fn g>, I[%i1, %i2], R[], -> %r0
-live-
rvmprof_code $1, %i0
ref_return %r0
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -7,6 +7,7 @@
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rlib import rthread, jit
+from rpython.rlib.objectmodel import we_are_translated
class VMProfPlatformUnsupported(Exception):
pass
@@ -96,6 +97,8 @@
return s
def leave_code(s):
+ if not we_are_translated():
+ assert vmprof_tl_stack.getraw() == s
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
@@ -107,40 +110,35 @@
# (It uses kind == VMPROF_JITTED_TAG and the VMPROFSTACK is allocated
# in the C stack.)
#
-# - The jitcode for decorated_jitted_function() in rvmprof.py, if
-# we_are_jitted() calls the oopspec'ed function jit_rvmprof_code(),
-# which turns into a simple jitcode opcode. The jitcode has a
-# simple structure:
+# - The jitcode for decorated_jitted_function() in rvmprof.py is
+# special-cased by jtransform.py to produce this:
#
# rvmprof_code(0, unique_id)
-# res = inline_call FUNC
+# res = inline_call FUNC <- for func(*args)
# rvmprof_code(1, unique_id)
+# return res
#
-# with no catch_exception logic for a "finally:" block. Instead the
-# blackhole interp looks for this simple pattern. This is needed
-# because, when a guard fails, the blackhole interp first rebuilds
-# all the intermediate RPython frames; at that point it needs to
-# call enter_code() on all intermediate RPython frames, so it does
-# pattern matching to recognize frames and learn about unique_id.
+# There is no 'catch_exception', but the second 'rvmprof_code' is
+# meant to be executed even in case there was an exception. This is
+# done by a special case in pyjitpl.py and blackhole.py. The point
+# is that the above simple pattern can be detected by the blackhole
+# interp, when it first rebuilds all the intermediate RPython
+# frames; at that point it needs to call jit_enter_code() on all
+# intermediate RPython frames, so it does pattern matching to
+# recognize when it must call that and with which 'unique_id' value.
#
# - The jitcode opcode 'rvmprof_code' doesn't produce any resop. When
-# meta-interpreting, it causes pyjitpl to call jit_enter_code(), and
-# jit_leave_code(). There is logic to call jit_leave_code() even if
-# we exit with an exception, even though there is no
-# 'catch_exception'.
-#
-# - When blackholing, the call to jit_enter_code() occurs imediately
-# as described above. For calling jit_leave_code(), we use the same
-# logic, detecting when we need to call it even though there is no
-# 'catch_exception'.
+# meta-interpreting, it causes pyjitpl to call jit_enter_code() or
+# jit_leave_code(). As mentioned above, there is logic to call
+# jit_leave_code() even if we exit with an exception, even though
+# there is no 'catch_exception'. There is similar logic inside
+# the blackhole interpreter.
- at jit.oopspec("rvmprof.code(leaving, unique_id)")
-def jit_rvmprof_code(leaving, unique_id):
- """Marker for the JIT. Also called directly from the metainterp and
- the blackhole interp."""
- if not leaving:
- enter_code(unique_id) # ignore the return value
- else:
- s = vmprof_tl_stack.getraw()
- assert s.c_value == unique_id
- leave_code(s)
+
+def jit_enter_code(unique_id):
+ enter_code(unique_id) # ignore the return value
+
+def jit_leave_code(unique_id):
+ s = vmprof_tl_stack.getraw()
+ assert s.c_value == unique_id
+ leave_code(s)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -178,12 +178,9 @@
except cintf.VMProfPlatformUnsupported:
return func
+ @jit.oopspec("rvmprof.jitted(unique_id)")
def decorated_jitted_function(unique_id, *args):
- cintf.jit_rvmprof_code(0, unique_id)
- res = func(*args)
- cintf.jit_rvmprof_code(1, unique_id) # no 'finally:', see cintf.py
- return res
- decorated_jitted_function._dont_inline_ = True
+ return func(*args)
def decorated_function(*args):
unique_id = get_code_fn(*args)._vmprof_unique_id
@@ -195,7 +192,6 @@
leave_code(x)
else:
return decorated_jitted_function(unique_id, *args)
- decorated_function._always_inline_ = True
decorated_function.__name__ = func.__name__ + '_rvmprof'
return decorated_function
From pypy.commits at gmail.com Sun Aug 7 04:51:21 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 01:51:21 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Finish the pyjitpl part
Message-ID: <57a6f689.56421c0a.10d91.cee2@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86054:e39f9543e5b0
Date: 2016-08-07 10:53 +0200
http://bitbucket.org/pypy/pypy/changeset/e39f9543e5b0/
Log: Finish the pyjitpl part
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2080,9 +2080,14 @@
frame.pc = target
raise ChangeFrame
if opcode == self.staticdata.op_rvmprof_code:
- # do the 'leave_code' for rvmprof, but then continue
- # popping frames
- import pdb;pdb.set_trace()
+ # do the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode jit_rvmprof_code
+ # manually here.
+ from rpython.rlib.rvmprof import cintf
+ arg1 = frame.registers_i[ord(code[position+1])].getint()
+ arg2 = frame.registers_i[ord(code[position+2])].getint()
+ assert arg1 == 1
+ cintf.jit_rvmprof_code(arg1, arg2)
self.popframe()
try:
self.compile_exit_frame_with_exception(self.last_exc_box)
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -123,22 +123,22 @@
# done by a special case in pyjitpl.py and blackhole.py. The point
# is that the above simple pattern can be detected by the blackhole
# interp, when it first rebuilds all the intermediate RPython
-# frames; at that point it needs to call jit_enter_code() on all
+# frames; at that point it needs to call jit_rvmprof_code(0) on all
# intermediate RPython frames, so it does pattern matching to
# recognize when it must call that and with which 'unique_id' value.
#
# - The jitcode opcode 'rvmprof_code' doesn't produce any resop. When
-# meta-interpreting, it causes pyjitpl to call jit_enter_code() or
-# jit_leave_code(). As mentioned above, there is logic to call
-# jit_leave_code() even if we exit with an exception, even though
-# there is no 'catch_exception'. There is similar logic inside
-# the blackhole interpreter.
+# meta-interpreting, it causes pyjitpl to call jit_rvmprof_code().
+# As mentioned above, there is logic to call jit_rvmprof_code(1)
+# even if we exit with an exception, even though there is no
+# 'catch_exception'. There is similar logic inside the blackhole
+# interpreter.
-def jit_enter_code(unique_id):
- enter_code(unique_id) # ignore the return value
-
-def jit_leave_code(unique_id):
- s = vmprof_tl_stack.getraw()
- assert s.c_value == unique_id
- leave_code(s)
+def jit_rvmprof_code(leaving, unique_id):
+ if leaving == 0:
+ enter_code(unique_id) # ignore the return value
+ else:
+ s = vmprof_tl_stack.getraw()
+ assert s.c_value == unique_id
+ leave_code(s)
From pypy.commits at gmail.com Sun Aug 7 05:09:36 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 02:09:36 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Blackhole interp:
in-progress
Message-ID: <57a6fad0.82ddc20a.5e0f1.1a8e@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86055:3f7d7ab3e4ff
Date: 2016-08-07 11:11 +0200
http://bitbucket.org/pypy/pypy/changeset/3f7d7ab3e4ff/
Log: Blackhole interp: in-progress
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -111,76 +111,3 @@
cintf.vmprof_tl_stack.setraw(null)
self.meta_interp(main, [30], inline=True)
assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
-
-
- def test_one(self):
-# py.test.skip("needs thread-locals in the JIT, which is only available "
-# "after translation")
- visited = []
-
- def helper():
- trace = []
- stack = cintf.vmprof_tl_stack.getraw()
- while stack:
- trace.append((stack.c_kind, stack.c_value))
- stack = stack.c_next
- visited.append(trace)
-
- llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
-
- driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
-
- class CodeObj(object):
- def __init__(self, name):
- self.name = name
-
- def get_code_fn(codes, code, arg, c):
- return code
-
- def get_name(code):
- return "foo"
-
- _get_vmprof().use_weaklist = False
- register_code_object_class(CodeObj, get_name)
-
- @vmprof_execute_code("main", get_code_fn,
- _hack_update_stack_untranslated=True)
- def f(codes, code, n, c):
- i = 0
- while i < n:
- driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
- if code.name == "main":
- c = f(codes, codes[1], 1, c)
- driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
- else:
- llfn()
- c -= 1
- if c < 0:
- llfn() # bridge
- i += 1
- return c
-
- def main(n):
- codes = [CodeObj("main"), CodeObj("not main")]
- for code in codes:
- register_code(code, get_name)
- return f(codes, codes[0], n, 8)
-
- class Hooks(jit.JitHookInterface):
- def after_compile(self, debug_info):
- self.raw_start = debug_info.asminfo.rawstart
-
- hooks = Hooks()
-
- null = lltype.nullptr(cintf.VMPROFSTACK)
- cintf.vmprof_tl_stack.setraw(null)
- self.meta_interp(main, [30], policy=JitPolicy(hooks), inline=True)
- assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
- print visited
- #v = set(visited)
- #assert 0 in v
- #v.remove(0)
- #assert len(v) == 1
- #assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- #assert cintf.vmprof_tl_stack.getraw() == null
- # ^^^ make sure we didn't leave anything dangling
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -64,6 +64,7 @@
assert self._insns[value] is None
self._insns[value] = key
self.op_catch_exception = insns.get('catch_exception/L', -1)
+ self.op_rvmprof_code = insns.get('rvmprof_code/ii', -1)
#
all_funcs = []
for key in self._insns:
@@ -270,6 +271,7 @@
self.dispatch_loop = builder.dispatch_loop
self.descrs = builder.descrs
self.op_catch_exception = builder.op_catch_exception
+ self.op_rvmprof_code = builder.op_rvmprof_code
self.count_interpreter = count_interpreter
#
if we_are_translated():
@@ -376,6 +378,20 @@
# no 'catch_exception' insn follows: just reraise
reraise(e)
+ def handle_rvmprof_enter(self):
+ code = self.jitcode.code
+ position = self.position
+ opcode = ord(code[position])
+ if opcode == self.op_rvmprof_code:
+ arg1 = self.registers_i[ord(code[position + 1])]
+ arg2 = self.registers_i[ord(code[position + 2])]
+ if arg1 == 1:
+ # we are resuming at a position that will do a
+ # jit_rvmprof_code(1), when really executed. That's a
+ # hint for the need for a jit_rvmprof_code(0).
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(0, arg2)
+
def copy_constants(self, registers, constants):
"""Copy jitcode.constants[0] to registers[255],
jitcode.constants[1] to registers[254],
@@ -1503,7 +1519,8 @@
@arguments("i", "i")
def bhimpl_rvmprof_code(leaving, unique_id):
- pass #import pdb;pdb.set_trace()
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(leaving, unique_id)
# ----------
# helpers to resume running in blackhole mode when a guard failed
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2084,8 +2084,8 @@
# continue popping frames. Decode jit_rvmprof_code
# manually here.
from rpython.rlib.rvmprof import cintf
- arg1 = frame.registers_i[ord(code[position+1])].getint()
- arg2 = frame.registers_i[ord(code[position+2])].getint()
+ arg1 = frame.registers_i[ord(code[position + 1])].getint()
+ arg2 = frame.registers_i[ord(code[position + 2])].getint()
assert arg1 == 1
cintf.jit_rvmprof_code(arg1, arg2)
self.popframe()
diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py
--- a/rpython/jit/metainterp/resume.py
+++ b/rpython/jit/metainterp/resume.py
@@ -1343,6 +1343,7 @@
jitcode = jitcodes[jitcode_pos]
curbh.setposition(jitcode, pc)
resumereader.consume_one_section(curbh)
+ curbh.handle_rvmprof_enter()
return curbh
def force_from_resumedata(metainterp_sd, storage, deadframe, vinfo, ginfo):
From pypy.commits at gmail.com Sun Aug 7 05:30:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 02:30:43 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Trying but failing to
write a test
Message-ID: <57a6ffc3.465d1c0a.1f06f.e16c@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86056:cde94224c375
Date: 2016-08-07 11:32 +0200
http://bitbucket.org/pypy/pypy/changeset/cde94224c375/
Log: Trying but failing to write a test
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -111,3 +111,44 @@
cintf.vmprof_tl_stack.setraw(null)
self.meta_interp(main, [30], inline=True)
assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception_in_blackhole(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ jit.promote(c + 5) # failing guard
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -375,6 +375,16 @@
target = ord(code[position+1]) | (ord(code[position+2])<<8)
self.position = target
return
+ if opcode == self.op_rvmprof_code:
+ import pdb;pdb.set_trace()
+ # do the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode jit_rvmprof_code
+ # manually here.
+ from rpython.rlib.rvmprof import cintf
+ arg1 = self.registers_i[ord(code[position + 1])]
+ arg2 = self.registers_i[ord(code[position + 2])]
+ assert arg1 == 1
+ cintf.jit_rvmprof_code(arg1, arg2)
# no 'catch_exception' insn follows: just reraise
reraise(e)
From pypy.commits at gmail.com Sun Aug 7 05:54:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 02:54:39 -0700 (PDT)
Subject: [pypy-commit] pypy default: Document how this pseudo-man-page is
turned into the standard man-page
Message-ID: <57a7055f.0205c20a.69fbd.4598@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86057:d274b6cd9120
Date: 2016-08-07 11:56 +0200
http://bitbucket.org/pypy/pypy/changeset/d274b6cd9120/
Log: Document how this pseudo-man-page is turned into the standard man-
page format
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
pypy
======
+.. note: this is turned into a regular man page "pypy.1" by
+ doing "make man" in pypy/doc/
+
SYNOPSIS
========
From pypy.commits at gmail.com Sun Aug 7 06:04:27 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 03:04:27 -0700 (PDT)
Subject: [pypy-commit] pypy default: update
Message-ID: <57a707ab.c310c20a.42ec2.2bae@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86058:dcc5844817d2
Date: 2016-08-07 12:06 +0200
http://bitbucket.org/pypy/pypy/changeset/dcc5844817d2/
Log: update
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
Defaults to 1/2 of your cache or ``4M``.
Small values (like 1 or 1KB) are useful for debugging.
-``PYPY_GC_NURSERY_CLEANUP``
- The interval at which nursery is cleaned up. Must
- be smaller than the nursery size and bigger than the
- biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+ If set to non-zero, will fill nursery with garbage, to help
+ debugging.
``PYPY_GC_INCREMENT_STEP``
The size of memory marked during the marking step. Default is size of
@@ -62,3 +61,8 @@
use.
Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+ The maximal number of pinned objects at any point in time. Defaults
+ to a conservative value depending on nursery size and maximum object
+ size inside the nursery. Useful for debugging by setting it to 0.
From pypy.commits at gmail.com Sun Aug 7 06:05:21 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 03:05:21 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: switch to pytest import
Message-ID: <57a707e1.c5aa1c0a.db8ad.ed30@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86059:e03eb49532c1
Date: 2016-08-07 11:34 +0200
http://bitbucket.org/pypy/pypy/changeset/e03eb49532c1/
Log: switch to pytest import
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -1,5 +1,5 @@
from __future__ import with_statement
-import py, os, errno
+import pytest, os, errno
from pypy.interpreter.gateway import interp2app, unwrap_spec
def getfile(space):
@@ -26,7 +26,7 @@
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
cls.w_regex_search = cls.space.wrap(interp2app(regex_search))
@@ -388,7 +388,7 @@
cls.old_read = os.read
if cls.runappdirect:
- py.test.skip("works with internals of _file impl on py.py")
+ pytest.skip("works with internals of _file impl on py.py")
def read(fd, n=None):
if fd != 424242:
return cls.old_read(fd, n)
@@ -427,9 +427,9 @@
def setup_class(cls):
if not cls.runappdirect:
- py.test.skip("likely to deadlock when interpreted by py.py")
+ pytest.skip("likely to deadlock when interpreted by py.py")
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("concurrency.txt")))
+ str(pytest.ensuretemp("fileimpl").join("concurrency.txt")))
cls.w_file = getfile(cls.space)
def test_concurrent_writes(self):
@@ -540,7 +540,7 @@
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
def test___enter__(self):
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,5 +1,5 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
from pypy.interpreter.gateway import interp2app
from pypy.module._file.test.test_file import regex_search
@@ -14,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -43,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -59,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -99,7 +97,7 @@
def test_fromfd():
# XXX review
if not hasattr(socket, 'fromfd'):
- py.test.skip("No socket.fromfd on this platform")
+ pytest.skip("No socket.fromfd on this platform")
orig_fd = path.open()
fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM),
@@ -159,7 +157,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -175,9 +173,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -196,9 +194,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -217,7 +215,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -231,7 +229,7 @@
w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, long(port))")
assert space.unwrap(w_l) == info
- py.test.skip("Unicode conversion is too slow")
+ pytest.skip("Unicode conversion is too slow")
w_l = space.appexec([w_socket, space.wrap(unicode(host)), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, port)")
assert space.unwrap(w_l) == info
@@ -252,7 +250,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -690,11 +688,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -710,11 +708,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
From pypy.commits at gmail.com Sun Aug 7 06:05:23 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 03:05:23 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: skip resource warning tests
under appdirect
Message-ID: <57a707e3.68adc20a.25dd3.37ac@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86060:d74e414261fc
Date: 2016-08-07 11:35 +0200
http://bitbucket.org/pypy/pypy/changeset/d74e414261fc/
Log: skip resource warning tests under appdirect
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -218,6 +218,9 @@
assert exc.value.filename == os.curdir
def test_encoding_errors(self):
+ import sys
+ if '__pypy__' not in sys.builtin_module_names:
+ pytest.skip("pypy only test")
import _file
with self.file(self.temppath, "w") as f:
@@ -266,6 +269,7 @@
if '__pypy__' in sys.builtin_module_names:
assert repr(self.temppath) in g.getvalue()
+ @pytest.mark.skipif("config.option.runappdirect")
def test_track_resources(self):
import os, gc, sys, cStringIO
if '__pypy__' not in sys.builtin_module_names:
@@ -308,6 +312,7 @@
assert self.regex_search("WARNING: unclosed file: ", msg)
assert "Created at" not in msg
+ @pytest.mark.skipif("config.option.runappdirect")
def test_track_resources_dont_crash(self):
import os, gc, sys, cStringIO
if '__pypy__' not in sys.builtin_module_names:
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -414,6 +414,7 @@
if os.name != 'nt':
raises(OSError, os.close, fileno)
+ @pytest.mark.skipif("config.option.runappdirect")
def test_track_resources(self):
import os, gc, sys, cStringIO
import _socket
From pypy.commits at gmail.com Sun Aug 7 06:05:27 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 03:05:27 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: add a paragraph about -X
track-resources to cpython_differences. Also add the
Message-ID: <57a707e7.0dc11c0a.fa8c3.f01c@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86062:9a4bd1d4dfc7
Date: 2016-08-07 12:04 +0200
http://bitbucket.org/pypy/pypy/changeset/9a4bd1d4dfc7/
Log: add a paragraph about -X track-resources to cpython_differences.
Also add the option to the man-page
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -48,6 +48,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
From pypy.commits at gmail.com Sun Aug 7 06:05:25 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 03:05:25 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: add whatsnew entry
Message-ID: <57a707e5.c2f3c20a.a80d5.3c3e@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86061:f12f10f3fd41
Date: 2016-08-07 11:39 +0200
http://bitbucket.org/pypy/pypy/changeset/f12f10f3fd41/
Log: add whatsnew entry
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -119,3 +119,8 @@
``ffi.from_buffer(string)`` in CFFI. Additionally, and most
importantly, CFFI calls that take directly a string as argument don't
copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
From pypy.commits at gmail.com Sun Aug 7 06:24:11 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 03:24:11 -0700 (PDT)
Subject: [pypy-commit] cffi default: Eventually,
replace this UserWarning with an error
Message-ID: <57a70c4b.4317c20a.5ff9b.42bf@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2736:18cdf37d6b26
Date: 2016-08-07 12:25 +0200
http://bitbucket.org/cffi/cffi/changeset/18cdf37d6b26/
Log: Eventually, replace this UserWarning with an error
diff --git a/cffi/model.py b/cffi/model.py
--- a/cffi/model.py
+++ b/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
From pypy.commits at gmail.com Sun Aug 7 06:24:51 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 03:24:51 -0700 (PDT)
Subject: [pypy-commit] pypy default: update to cffi/18cdf37d6b26
Message-ID: <57a70c73.0dc11c0a.fa8c3.f78e@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86063:962a5a4ad08e
Date: 2016-08-07 12:26 +0200
http://bitbucket.org/pypy/pypy/changeset/962a5a4ad08e/
Log: update to cffi/18cdf37d6b26
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
From pypy.commits at gmail.com Sun Aug 7 08:35:55 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 05:35:55 -0700 (PDT)
Subject: [pypy-commit] pypy resource_warning: close to-be-merged branch
Message-ID: <57a72b2b.a710c20a.3fa53.700a@mx.google.com>
Author: Carl Friedrich Bolz
Branch: resource_warning
Changeset: r86064:64bba3428b02
Date: 2016-08-07 14:33 +0200
http://bitbucket.org/pypy/pypy/changeset/64bba3428b02/
Log: close to-be-merged branch
From pypy.commits at gmail.com Sun Aug 7 08:35:57 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Sun, 07 Aug 2016 05:35:57 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge resource_warning:
Message-ID: <57a72b2d.915c1c0a.c121.2511@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r86065:16e118636641
Date: 2016-08-07 14:35 +0200
http://bitbucket.org/pypy/pypy/changeset/16e118636641/
Log: merge resource_warning:
adds a new commandline option -X track-resources that will produce a
ResourceWarning when the GC closes a file or socket. The traceback
for the place where the file or socket was allocated is given as
well
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -51,6 +51,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -119,3 +119,8 @@
``ffi.from_buffer(string)`` in CFFI. Additionally, and most
importantly, CFFI calls that take directly a string as argument don't
copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -24,11 +24,15 @@
-V : print the Python version number and exit (also --version)
-W arg : warning control; arg is action:message:category:module:lineno
also PYTHONWARNINGS=arg
+-X arg : set implementation-specific option
file : program read from script file
- : program read from stdin (default; interactive mode if a tty)
arg ...: arguments passed to program in sys.argv[1:]
+
PyPy options and arguments:
--info : print translation information about this PyPy executable
+-X track-resources : track the creation of files and sockets and display
+ a warning if they are not closed explicitly
"""
# Missing vs CPython: PYTHONHOME, PYTHONCASEOK
USAGE2 = """
@@ -229,6 +233,14 @@
import pypyjit
pypyjit.set_param(jitparam)
+def set_runtime_options(options, Xparam, *args):
+ if Xparam == 'track-resources':
+ sys.pypy_set_track_resources(True)
+ else:
+ print >> sys.stderr, 'usage: %s -X [options]' % (get_sys_executable(),)
+ print >> sys.stderr, '[options] can be: track-resources'
+ raise SystemExit
+
class CommandLineError(Exception):
pass
@@ -404,6 +416,7 @@
'--info': (print_info, None),
'--jit': (set_jit_option, Ellipsis),
'-funroll-loops': (funroll_loops, None),
+ '-X': (set_runtime_options, Ellipsis),
'--': (end_options, None),
}
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1764,6 +1764,40 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
+ def resource_warning(self, w_msg, w_tb):
+ self.appexec([w_msg, w_tb],
+ """(msg, tb):
+ import sys
+ print >> sys.stderr, msg
+ if tb:
+ print >> sys.stderr, "Created at (most recent call last):"
+ print >> sys.stderr, tb
+ """)
+
+ def format_traceback(self):
+ # we need to disable track_resources before calling the traceback
+ # module. Else, it tries to open more files to format the traceback,
+ # the file constructor will call space.format_traceback etc., in an
+ # inifite recursion
+ flag = self.sys.track_resources
+ self.sys.track_resources = False
+ try:
+ return self.appexec([],
+ """():
+ import sys, traceback
+ # the "1" is because we don't want to show THIS code
+ # object in the traceback
+ try:
+ f = sys._getframe(1)
+ except ValueError:
+ # this happens if you call format_traceback at the very beginning
+ # of startup, when there is no bottom code object
+ return ''
+ return "".join(traceback.format_stack(f))
+ """)
+ finally:
+ self.sys.track_resources = flag
+
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -220,6 +220,13 @@
expected = {"no_user_site": True}
self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected)
+ def test_track_resources(self, monkeypatch):
+ myflag = [False]
+ def pypy_set_track_resources(flag):
+ myflag[0] = flag
+ monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
+ self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
+ assert myflag[0] == True
class TestInteraction:
"""
@@ -1074,4 +1081,3 @@
# assert it did not crash
finally:
sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -427,3 +427,28 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
+
+ def test_format_traceback(self):
+ from pypy.tool.pytest.objspace import maketestobjspace
+ from pypy.interpreter.gateway import interp2app
+ #
+ def format_traceback(space):
+ return space.format_traceback()
+ #
+ space = maketestobjspace()
+ w_format_traceback = space.wrap(interp2app(format_traceback))
+ w_tb = space.appexec([w_format_traceback], """(format_traceback):
+ def foo():
+ return bar()
+ def bar():
+ return format_traceback()
+ return foo()
+ """)
+ tb = space.str_w(w_tb)
+ expected = '\n'.join([
+ ' File "?", line 6, in anonymous', # this is the appexec code object
+ ' File "?", line 3, in foo',
+ ' File "?", line 5, in bar',
+ ''
+ ])
+ assert tb == expected
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -38,23 +38,33 @@
errors = None
fd = -1
cffi_fileobj = None # pypy/module/_cffi_backend
+ w_tb = None # String representation of the traceback at creation time
newlines = 0 # Updated when the stream is closed
def __init__(self, space):
self.space = space
self.register_finalizer(space)
+ if self.space.sys.track_resources:
+ self.w_tb = self.space.format_traceback()
def _finalize_(self):
# assume that the file and stream objects are only visible in the
# thread that runs _finalize_, so no race condition should be
# possible and no locking is done here.
- if self.stream is not None:
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ if self.stream is None:
+ return
+ if self.space.sys.track_resources:
+ w_repr = self.space.repr(self)
+ str_repr = self.space.str_w(w_repr)
+ w_msg = self.space.wrap("WARNING: unclosed file: " + str_repr)
+ self.space.resource_warning(w_msg, self.w_tb)
+ #
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -1,5 +1,6 @@
from __future__ import with_statement
-import py, os, errno
+import pytest, os, errno
+from pypy.interpreter.gateway import interp2app, unwrap_spec
def getfile(space):
return space.appexec([], """():
@@ -10,13 +11,24 @@
return file
""")
+# the following function is used e.g. in test_resource_warning
+ at unwrap_spec(regex=str, s=str)
+def regex_search(space, regex, s):
+ import re
+ import textwrap
+ regex = textwrap.dedent(regex).strip()
+ m = re.search(regex, s)
+ m = bool(m)
+ return space.wrap(m)
+
class AppTestFile(object):
spaceconfig = dict(usemodules=("_file",))
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
+ cls.w_regex_search = cls.space.wrap(interp2app(regex_search))
def test_simple(self):
f = self.file(self.temppath, "w")
@@ -206,6 +218,9 @@
assert exc.value.filename == os.curdir
def test_encoding_errors(self):
+ import sys
+ if '__pypy__' not in sys.builtin_module_names:
+ pytest.skip("pypy only test")
import _file
with self.file(self.temppath, "w") as f:
@@ -254,6 +269,71 @@
if '__pypy__' in sys.builtin_module_names:
assert repr(self.temppath) in g.getvalue()
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources(self):
+ import os, gc, sys, cStringIO
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ def fn(flag1, flag2, do_close=False):
+ sys.pypy_set_track_resources(flag1)
+ f = self.file(self.temppath, 'w')
+ sys.pypy_set_track_resources(flag2)
+ buf = cStringIO.StringIO()
+ preverr = sys.stderr
+ try:
+ sys.stderr = buf
+ if do_close:
+ f.close()
+ del f
+ gc.collect() # force __del__ to be called
+ finally:
+ sys.stderr = preverr
+ sys.pypy_set_track_resources(False)
+ return buf.getvalue()
+
+ # check with track_resources disabled
+ assert fn(False, False) == ""
+ #
+ # check that we don't get the warning if we actually close the file
+ assert fn(False, False, do_close=True) == ""
+ #
+ # check with track_resources enabled
+ msg = fn(True, True)
+ assert self.regex_search(r"""
+ WARNING: unclosed file:
+ Created at \(most recent call last\):
+ File ".*", line .*, in test_track_resources
+ File ".*", line .*, in fn
+ """, msg)
+ #
+ # check with track_resources enabled in the destructor BUT with a
+ # file which was created when track_resources was disabled
+ msg = fn(False, True)
+ assert self.regex_search("WARNING: unclosed file: ", msg)
+ assert "Created at" not in msg
+
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources_dont_crash(self):
+ import os, gc, sys, cStringIO
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ #
+ # try hard to create a code object whose co_filename points to an
+ # EXISTING file, so that traceback.py tries to open it when formatting
+ # the stacktrace
+ f = open(self.temppath, 'w')
+ f.close()
+ co = compile('open("%s")' % self.temppath, self.temppath, 'exec')
+ sys.pypy_set_track_resources(True)
+ try:
+ # this exec used to fail, because space.format_traceback tried to
+ # recurively open a file, causing an infinite recursion. For the
+ # purpose of this test, it is enough that it actually finishes
+ # without errors
+ exec co
+ finally:
+ sys.pypy_set_track_resources(False)
+
def test_truncate(self):
f = self.file(self.temppath, "w")
f.write("foo")
@@ -313,7 +393,7 @@
cls.old_read = os.read
if cls.runappdirect:
- py.test.skip("works with internals of _file impl on py.py")
+ pytest.skip("works with internals of _file impl on py.py")
def read(fd, n=None):
if fd != 424242:
return cls.old_read(fd, n)
@@ -352,9 +432,9 @@
def setup_class(cls):
if not cls.runappdirect:
- py.test.skip("likely to deadlock when interpreted by py.py")
+ pytest.skip("likely to deadlock when interpreted by py.py")
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("concurrency.txt")))
+ str(pytest.ensuretemp("fileimpl").join("concurrency.txt")))
cls.w_file = getfile(cls.space)
def test_concurrent_writes(self):
@@ -465,7 +545,7 @@
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
def test___enter__(self):
diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py
--- a/pypy/module/_socket/interp_socket.py
+++ b/pypy/module/_socket/interp_socket.py
@@ -151,9 +151,23 @@
class W_Socket(W_Root):
+ w_tb = None # String representation of the traceback at creation time
+
def __init__(self, space, sock):
+ self.space = space
self.sock = sock
register_socket(space, sock)
+ if self.space.sys.track_resources:
+ self.w_tb = self.space.format_traceback()
+ self.register_finalizer(space)
+
+ def _finalize_(self):
+ is_open = self.sock.fd >= 0
+ if is_open and self.space.sys.track_resources:
+ w_repr = self.space.repr(self)
+ str_repr = self.space.str_w(w_repr)
+ w_msg = self.space.wrap("WARNING: unclosed " + str_repr)
+ self.space.resource_warning(w_msg, self.w_tb)
def get_type_w(self, space):
return space.wrap(self.sock.type)
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,6 +1,8 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
+from pypy.interpreter.gateway import interp2app
+from pypy.module._file.test.test_file import regex_search
from rpython.tool.udir import udir
from rpython.rlib import rsocket
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -12,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -41,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -57,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -97,7 +97,7 @@
def test_fromfd():
# XXX review
if not hasattr(socket, 'fromfd'):
- py.test.skip("No socket.fromfd on this platform")
+ pytest.skip("No socket.fromfd on this platform")
orig_fd = path.open()
fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM),
@@ -157,7 +157,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -173,9 +173,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -194,9 +194,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -215,7 +215,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -229,7 +229,7 @@
w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, long(port))")
assert space.unwrap(w_l) == info
- py.test.skip("Unicode conversion is too slow")
+ pytest.skip("Unicode conversion is too slow")
w_l = space.appexec([w_socket, space.wrap(unicode(host)), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, port)")
assert space.unwrap(w_l) == info
@@ -250,7 +250,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -314,6 +314,7 @@
def setup_class(cls):
cls.space = space
cls.w_udir = space.wrap(str(udir))
+ cls.w_regex_search = space.wrap(interp2app(regex_search))
def teardown_class(cls):
if not cls.runappdirect:
@@ -402,6 +403,64 @@
if os.name != 'nt':
raises(OSError, os.close, fileno)
+ def test_socket_track_resources(self):
+ import _socket, os, gc, sys, cStringIO
+ s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
+ fileno = s.fileno()
+ assert s.fileno() >= 0
+ s.close()
+ assert s.fileno() < 0
+ s.close()
+ if os.name != 'nt':
+ raises(OSError, os.close, fileno)
+
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources(self):
+ import os, gc, sys, cStringIO
+ import _socket
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ #
+ def fn(flag1, flag2, do_close=False):
+ sys.pypy_set_track_resources(flag1)
+ mysock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
+ sys.pypy_set_track_resources(flag2)
+ buf = cStringIO.StringIO()
+ preverr = sys.stderr
+ try:
+ sys.stderr = buf
+ if do_close:
+ mysock.close()
+ del mysock
+ gc.collect() # force __del__ to be called
+ finally:
+ sys.stderr = preverr
+ sys.pypy_set_track_resources(False)
+ return buf.getvalue()
+
+ # check with track_resources disabled
+ assert fn(False, False) == ""
+ #
+ # check that we don't get the warning if we actually closed the socket
+ msg = fn(True, True, do_close=True)
+ assert msg == ''
+ #
+ # check with track_resources enabled
+ msg = fn(True, True)
+ assert self.regex_search(r"""
+ WARNING: unclosed
+ Created at \(most recent call last\):
+ File ".*", line .*, in test_track_resources
+ File ".*", line .*, in fn
+ """, msg)
+ #
+ # track_resources is enabled after the construction of the socket. in
+ # this case, the socket is not registered for finalization at all, so
+ # we don't see a message
+ msg = fn(False, True)
+ assert msg == ''
+
+
def test_socket_close_error(self):
import _socket, os
if os.name == 'nt':
@@ -630,11 +689,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -650,11 +709,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -20,6 +20,7 @@
self.defaultencoding = "ascii"
self.filesystemencoding = None
self.debug = True
+ self.track_resources = False
self.dlopenflags = rdynload._dlopen_default_mode()
interpleveldefs = {
@@ -55,6 +56,8 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
+ 'pypy_set_track_resources' : 'vm.set_track_resources',
+ 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,6 +61,13 @@
"""
return space.wrap(space.sys.recursionlimit)
+ at unwrap_spec(flag=bool)
+def set_track_resources(space, flag):
+ space.sys.track_resources = flag
+
+def get_track_resources(space):
+ return space.wrap(space.sys.track_resources)
+
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -428,4 +428,5 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
+FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
From pypy.commits at gmail.com Sun Aug 7 13:46:28 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 10:46:28 -0700 (PDT)
Subject: [pypy-commit] pypy default: typo
Message-ID: <57a773f4.88711c0a.509e1.8e14@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86066:20f28e81988a
Date: 2016-08-07 19:45 +0200
http://bitbucket.org/pypy/pypy/changeset/20f28e81988a/
Log: typo
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -116,7 +116,7 @@
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would a too strong restriction to try to enforce
+this are basically bogus. It would be a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
From pypy.commits at gmail.com Sun Aug 7 13:52:34 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 10:52:34 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Tweak the test until it
tests what it is supposed to.
Message-ID: <57a77562.09afc20a.998b.d209@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86067:39f3ff6ed5d7
Date: 2016-08-07 19:52 +0200
http://bitbucket.org/pypy/pypy/changeset/39f3ff6ed5d7/
Log: Tweak the test until it tests what it is supposed to.
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -125,17 +125,20 @@
_hack_update_stack_untranslated=True)
def f(codes, code, n, c):
i = 0
- while i < n:
+ while True:
driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if i >= n:
+ break
+ i += 1
if code.name == "main":
try:
f(codes, codes[1], 1, c)
except MyExc as e:
c = e.c
+ driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
else:
llfn()
c -= 1
- i += 1
jit.promote(c + 5) # failing guard
raise MyExc(c)
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -376,7 +376,6 @@
self.position = target
return
if opcode == self.op_rvmprof_code:
- import pdb;pdb.set_trace()
# do the 'jit_rvmprof_code(1)' for rvmprof, but then
# continue popping frames. Decode jit_rvmprof_code
# manually here.
From pypy.commits at gmail.com Sun Aug 7 14:00:47 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 11:00:47 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: clean-ups
Message-ID: <57a7774f.81cb1c0a.a9579.e1bb@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86068:3b3cad7cd230
Date: 2016-08-07 20:00 +0200
http://bitbucket.org/pypy/pypy/changeset/3b3cad7cd230/
Log: clean-ups
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -376,8 +376,8 @@
self.position = target
return
if opcode == self.op_rvmprof_code:
- # do the 'jit_rvmprof_code(1)' for rvmprof, but then
- # continue popping frames. Decode jit_rvmprof_code
+ # call the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode the 'rvmprof_code' insn
# manually here.
from rpython.rlib.rvmprof import cintf
arg1 = self.registers_i[ord(code[position + 1])]
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -74,8 +74,6 @@
self.parent_snapshot = None
# counter for unrolling inlined loops
self.unroll_iterations = 1
- # rvmprof
- self.rvmprof_unique_id = -1
@specialize.arg(3)
def copy_constants(self, registers, constants, ConstClass):
@@ -2080,8 +2078,8 @@
frame.pc = target
raise ChangeFrame
if opcode == self.staticdata.op_rvmprof_code:
- # do the 'jit_rvmprof_code(1)' for rvmprof, but then
- # continue popping frames. Decode jit_rvmprof_code
+ # call the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode the 'rvmprof_code' insn
# manually here.
from rpython.rlib.rvmprof import cintf
arg1 = frame.registers_i[ord(code[position + 1])].getint()
From pypy.commits at gmail.com Sun Aug 7 15:25:09 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 12:25:09 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: translation fix
Message-ID: <57a78b15.915c1c0a.c121.a7f1@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86069:690fd43896cd
Date: 2016-08-07 20:33 +0100
http://bitbucket.org/pypy/pypy/changeset/690fd43896cd/
Log: translation fix
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -184,6 +184,8 @@
def decorated_function(*args):
unique_id = get_code_fn(*args)._vmprof_unique_id
+ unique_id = rffi.cast(lltype.Signed, unique_id)
+ # ^^^ removes the "known non-negative" hint for annotation
if not jit.we_are_jitted():
x = enter_code(unique_id)
try:
From pypy.commits at gmail.com Sun Aug 7 16:24:39 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 07 Aug 2016 13:24:39 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-realloc: close branch to be merged
Message-ID: <57a79907.44ce1c0a.a84b7.bf47@mx.google.com>
Author: Matti Picus
Branch: cpyext-realloc
Changeset: r86072:965b80b2b7c3
Date: 2016-08-07 23:21 +0300
http://bitbucket.org/pypy/pypy/changeset/965b80b2b7c3/
Log: close branch to be merged
From pypy.commits at gmail.com Sun Aug 7 16:24:41 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 07 Aug 2016 13:24:41 -0700 (PDT)
Subject: [pypy-commit] pypy default: merge cpyext-realloc which implements
PyObject_Realloc
Message-ID: <57a79909.81a2c20a.51926.589b@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86073:4c630a5bbbdb
Date: 2016-08-07 23:22 +0300
http://bitbucket.org/pypy/pypy/changeset/4c630a5bbbdb/
Log: merge cpyext-realloc which implements PyObject_Realloc
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -21,6 +21,8 @@
flavor='raw',
add_memory_pressure=True)
+realloc = rffi.llexternal('realloc', [rffi.VOIDP, rffi.SIZE_T], rffi.VOIDP)
+
@cpython_api([rffi.VOIDP, size_t], rffi.VOIDP)
def PyObject_Realloc(space, ptr, size):
if not lltype.cast_ptr_to_int(ptr):
@@ -28,7 +30,7 @@
flavor='raw',
add_memory_pressure=True)
# XXX FIXME
- return lltype.nullptr(rffi.VOIDP.TO)
+ return realloc(ptr, size)
@cpython_api([rffi.VOIDP], lltype.Void)
def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -235,8 +235,9 @@
assert type(x) is int
assert x == -424344
- @pytest.mark.skipif(True, reason='realloc not fully implemented')
def test_object_realloc(self):
+ if not self.runappdirect:
+ skip('no untranslated support for realloc')
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
"""
From pypy.commits at gmail.com Sun Aug 7 16:24:35 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 07 Aug 2016 13:24:35 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-realloc: merge default into branch
Message-ID: <57a79903.469d1c0a.9fd69.bbb4@mx.google.com>
Author: Matti Picus
Branch: cpyext-realloc
Changeset: r86070:a6addb94dc29
Date: 2016-08-07 23:15 +0300
http://bitbucket.org/pypy/pypy/changeset/a6addb94dc29/
Log: merge default into branch
diff too long, truncating to 2000 out of 2979 lines
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.7.0
+Version: 1.8.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
@@ -59,7 +61,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
@@ -196,20 +198,6 @@
return NULL;
}
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
- const char *fnname)
-{
- if (PyTuple_GET_SIZE(args_tuple) != expected) {
- PyErr_Format(PyExc_TypeError,
- "%.150s() takes exactly %zd arguments (%zd given)",
- fnname, expected, PyTuple_GET_SIZE(args_tuple));
- return NULL;
- }
- return &PyTuple_GET_ITEM(args_tuple, 0); /* pointer to the first item,
- the others follow */
-}
-
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
+ if self.ffi._embedding is None:
+ prnt('#define Py_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
@@ -683,13 +685,11 @@
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
- prnt(' PyObject **aa;')
prnt()
- prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
- prnt(' if (aa == NULL)')
+ prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+ name, len(rng), len(rng),
+ ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
- for i in rng:
- prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
+ self._check_not_opaque(fldtype,
+ "field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
@@ -911,6 +913,13 @@
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
+ def _check_not_opaque(self, tp, location):
+ while isinstance(tp, model.ArrayType):
+ tp = tp.item
+ if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+ raise TypeError(
+ "%s is of an opaque type (not declared in cdef())" % location)
+
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would be a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
Defaults to 1/2 of your cache or ``4M``.
Small values (like 1 or 1KB) are useful for debugging.
-``PYPY_GC_NURSERY_CLEANUP``
- The interval at which nursery is cleaned up. Must
- be smaller than the nursery size and bigger than the
- biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+ If set to non-zero, will fill nursery with garbage, to help
+ debugging.
``PYPY_GC_INCREMENT_STEP``
The size of memory marked during the marking step. Default is size of
@@ -62,3 +61,8 @@
use.
Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+ The maximal number of pinned objects at any point in time. Defaults
+ to a conservative value depending on nursery size and maximum object
+ size inside the nursery. Useful for debugging by setting it to 0.
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
pypy
======
+.. note: this is turned into a regular man page "pypy.1" by
+ doing "make man" in pypy/doc/
+
SYNOPSIS
========
@@ -48,6 +51,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -109,3 +109,18 @@
.. branch: jitlog-exact-source-lines
Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -24,11 +24,15 @@
-V : print the Python version number and exit (also --version)
-W arg : warning control; arg is action:message:category:module:lineno
also PYTHONWARNINGS=arg
+-X arg : set implementation-specific option
file : program read from script file
- : program read from stdin (default; interactive mode if a tty)
arg ...: arguments passed to program in sys.argv[1:]
+
PyPy options and arguments:
--info : print translation information about this PyPy executable
+-X track-resources : track the creation of files and sockets and display
+ a warning if they are not closed explicitly
"""
# Missing vs CPython: PYTHONHOME, PYTHONCASEOK
USAGE2 = """
@@ -229,6 +233,14 @@
import pypyjit
pypyjit.set_param(jitparam)
+def set_runtime_options(options, Xparam, *args):
+ if Xparam == 'track-resources':
+ sys.pypy_set_track_resources(True)
+ else:
+ print >> sys.stderr, 'usage: %s -X [options]' % (get_sys_executable(),)
+ print >> sys.stderr, '[options] can be: track-resources'
+ raise SystemExit
+
class CommandLineError(Exception):
pass
@@ -404,6 +416,7 @@
'--info': (print_info, None),
'--jit': (set_jit_option, Ellipsis),
'-funroll-loops': (funroll_loops, None),
+ '-X': (set_runtime_options, Ellipsis),
'--': (end_options, None),
}
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -108,8 +108,15 @@
return getattr(space, name)(operand)
return do_fold
-def _fold_pow(space, left, right):
- return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+ # don't constant-fold if "w_left" and "w_right" are integers and
+ # the estimated bit length of the power is unreasonably large
+ space.appexec([w_left, w_right], """(left, right):
+ if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+ if left.bit_length() * right > 5000:
+ raise OverflowError
+ """)
+ return space.pow(w_left, w_right, space.w_None)
def _fold_not(space, operand):
return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1156,3 +1156,22 @@
counts = self.count_instructions(source)
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
+
+ def test_dont_fold_huge_powers(self):
+ for source in (
+ "2 ** 3000", # not constant-folded: too big
+ "(-2) ** 3000",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER in counts
+
+ for source in (
+ "2 ** 2000", # constant-folded
+ "2 ** -3000",
+ "1.001 ** 3000",
+ "1 ** 3000.0",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER not in counts
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1764,6 +1764,40 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
+ def resource_warning(self, w_msg, w_tb):
+ self.appexec([w_msg, w_tb],
+ """(msg, tb):
+ import sys
+ print >> sys.stderr, msg
+ if tb:
+ print >> sys.stderr, "Created at (most recent call last):"
+ print >> sys.stderr, tb
+ """)
+
+ def format_traceback(self):
+ # we need to disable track_resources before calling the traceback
+ # module. Else, it tries to open more files to format the traceback,
+ # the file constructor will call space.format_traceback etc., in an
+ # inifite recursion
+ flag = self.sys.track_resources
+ self.sys.track_resources = False
+ try:
+ return self.appexec([],
+ """():
+ import sys, traceback
+ # the "1" is because we don't want to show THIS code
+ # object in the traceback
+ try:
+ f = sys._getframe(1)
+ except ValueError:
+ # this happens if you call format_traceback at the very beginning
+ # of startup, when there is no bottom code object
+ return ''
+ return "".join(traceback.format_stack(f))
+ """)
+ finally:
+ self.sys.track_resources = flag
+
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -220,6 +220,13 @@
expected = {"no_user_site": True}
self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected)
+ def test_track_resources(self, monkeypatch):
+ myflag = [False]
+ def pypy_set_track_resources(flag):
+ myflag[0] = flag
+ monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
+ self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
+ assert myflag[0] == True
class TestInteraction:
"""
@@ -1074,4 +1081,3 @@
# assert it did not crash
finally:
sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -427,3 +427,28 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
+
+ def test_format_traceback(self):
+ from pypy.tool.pytest.objspace import maketestobjspace
+ from pypy.interpreter.gateway import interp2app
+ #
+ def format_traceback(space):
+ return space.format_traceback()
+ #
+ space = maketestobjspace()
+ w_format_traceback = space.wrap(interp2app(format_traceback))
+ w_tb = space.appexec([w_format_traceback], """(format_traceback):
+ def foo():
+ return bar()
+ def bar():
+ return format_traceback()
+ return foo()
+ """)
+ tb = space.str_w(w_tb)
+ expected = '\n'.join([
+ ' File "?", line 6, in anonymous', # this is the appexec code object
+ ' File "?", line 3, in foo',
+ ' File "?", line 5, in bar',
+ ''
+ ])
+ assert tb == expected
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.7.0"
+VERSION = "1.8.0"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -14,8 +14,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -28,6 +28,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -70,9 +73,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("str or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -260,8 +261,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = w_init.str_w(space)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -297,10 +306,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -38,23 +38,33 @@
errors = None
fd = -1
cffi_fileobj = None # pypy/module/_cffi_backend
+ w_tb = None # String representation of the traceback at creation time
newlines = 0 # Updated when the stream is closed
def __init__(self, space):
self.space = space
self.register_finalizer(space)
+ if self.space.sys.track_resources:
+ self.w_tb = self.space.format_traceback()
def _finalize_(self):
# assume that the file and stream objects are only visible in the
# thread that runs _finalize_, so no race condition should be
# possible and no locking is done here.
- if self.stream is not None:
- try:
- self.direct_close()
- except StreamErrors as e:
- operr = wrap_streamerror(self.space, e, self.w_name)
- raise operr
+ if self.stream is None:
+ return
+ if self.space.sys.track_resources:
+ w_repr = self.space.repr(self)
+ str_repr = self.space.str_w(w_repr)
+ w_msg = self.space.wrap("WARNING: unclosed file: " + str_repr)
+ self.space.resource_warning(w_msg, self.w_tb)
+ #
+ try:
+ self.direct_close()
+ except StreamErrors as e:
+ operr = wrap_streamerror(self.space, e, self.w_name)
+ raise operr
def fdopenstream(self, stream, fd, mode, w_name=None):
self.fd = fd
diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py
--- a/pypy/module/_file/test/test_file.py
+++ b/pypy/module/_file/test/test_file.py
@@ -1,5 +1,6 @@
from __future__ import with_statement
-import py, os, errno
+import pytest, os, errno
+from pypy.interpreter.gateway import interp2app, unwrap_spec
def getfile(space):
return space.appexec([], """():
@@ -10,13 +11,24 @@
return file
""")
+# the following function is used e.g. in test_resource_warning
+ at unwrap_spec(regex=str, s=str)
+def regex_search(space, regex, s):
+ import re
+ import textwrap
+ regex = textwrap.dedent(regex).strip()
+ m = re.search(regex, s)
+ m = bool(m)
+ return space.wrap(m)
+
class AppTestFile(object):
spaceconfig = dict(usemodules=("_file",))
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
+ cls.w_regex_search = cls.space.wrap(interp2app(regex_search))
def test_simple(self):
f = self.file(self.temppath, "w")
@@ -206,6 +218,9 @@
assert exc.value.filename == os.curdir
def test_encoding_errors(self):
+ import sys
+ if '__pypy__' not in sys.builtin_module_names:
+ pytest.skip("pypy only test")
import _file
with self.file(self.temppath, "w") as f:
@@ -254,6 +269,71 @@
if '__pypy__' in sys.builtin_module_names:
assert repr(self.temppath) in g.getvalue()
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources(self):
+ import os, gc, sys, cStringIO
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ def fn(flag1, flag2, do_close=False):
+ sys.pypy_set_track_resources(flag1)
+ f = self.file(self.temppath, 'w')
+ sys.pypy_set_track_resources(flag2)
+ buf = cStringIO.StringIO()
+ preverr = sys.stderr
+ try:
+ sys.stderr = buf
+ if do_close:
+ f.close()
+ del f
+ gc.collect() # force __del__ to be called
+ finally:
+ sys.stderr = preverr
+ sys.pypy_set_track_resources(False)
+ return buf.getvalue()
+
+ # check with track_resources disabled
+ assert fn(False, False) == ""
+ #
+ # check that we don't get the warning if we actually close the file
+ assert fn(False, False, do_close=True) == ""
+ #
+ # check with track_resources enabled
+ msg = fn(True, True)
+ assert self.regex_search(r"""
+ WARNING: unclosed file:
+ Created at \(most recent call last\):
+ File ".*", line .*, in test_track_resources
+ File ".*", line .*, in fn
+ """, msg)
+ #
+ # check with track_resources enabled in the destructor BUT with a
+ # file which was created when track_resources was disabled
+ msg = fn(False, True)
+ assert self.regex_search("WARNING: unclosed file: ", msg)
+ assert "Created at" not in msg
+
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources_dont_crash(self):
+ import os, gc, sys, cStringIO
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ #
+ # try hard to create a code object whose co_filename points to an
+ # EXISTING file, so that traceback.py tries to open it when formatting
+ # the stacktrace
+ f = open(self.temppath, 'w')
+ f.close()
+ co = compile('open("%s")' % self.temppath, self.temppath, 'exec')
+ sys.pypy_set_track_resources(True)
+ try:
+ # this exec used to fail, because space.format_traceback tried to
+ # recurively open a file, causing an infinite recursion. For the
+ # purpose of this test, it is enough that it actually finishes
+ # without errors
+ exec co
+ finally:
+ sys.pypy_set_track_resources(False)
+
def test_truncate(self):
f = self.file(self.temppath, "w")
f.write("foo")
@@ -313,7 +393,7 @@
cls.old_read = os.read
if cls.runappdirect:
- py.test.skip("works with internals of _file impl on py.py")
+ pytest.skip("works with internals of _file impl on py.py")
def read(fd, n=None):
if fd != 424242:
return cls.old_read(fd, n)
@@ -352,9 +432,9 @@
def setup_class(cls):
if not cls.runappdirect:
- py.test.skip("likely to deadlock when interpreted by py.py")
+ pytest.skip("likely to deadlock when interpreted by py.py")
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("concurrency.txt")))
+ str(pytest.ensuretemp("fileimpl").join("concurrency.txt")))
cls.w_file = getfile(cls.space)
def test_concurrent_writes(self):
@@ -465,7 +545,7 @@
def setup_class(cls):
cls.w_temppath = cls.space.wrap(
- str(py.test.ensuretemp("fileimpl").join("foo.txt")))
+ str(pytest.ensuretemp("fileimpl").join("foo.txt")))
cls.w_file = getfile(cls.space)
def test___enter__(self):
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -401,21 +401,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -20,7 +20,8 @@
def _getfunc(space, CDLL, w_name, w_argtypes, w_restype):
argtypes_w, argtypes, w_restype, restype = unpack_argtypes(
space, w_argtypes, w_restype)
- if space.isinstance_w(w_name, space.w_str):
+ if (space.isinstance_w(w_name, space.w_str) or
+ space.isinstance_w(w_name, space.w_unicode)):
name = space.str_w(w_name)
try:
func = CDLL.cdll.getpointer(name, argtypes, restype,
diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py
--- a/pypy/module/_socket/interp_socket.py
+++ b/pypy/module/_socket/interp_socket.py
@@ -151,9 +151,23 @@
class W_Socket(W_Root):
+ w_tb = None # String representation of the traceback at creation time
+
def __init__(self, space, sock):
+ self.space = space
self.sock = sock
register_socket(space, sock)
+ if self.space.sys.track_resources:
+ self.w_tb = self.space.format_traceback()
+ self.register_finalizer(space)
+
+ def _finalize_(self):
+ is_open = self.sock.fd >= 0
+ if is_open and self.space.sys.track_resources:
+ w_repr = self.space.repr(self)
+ str_repr = self.space.str_w(w_repr)
+ w_msg = self.space.wrap("WARNING: unclosed " + str_repr)
+ self.space.resource_warning(w_msg, self.w_tb)
def get_type_w(self, space):
return space.wrap(self.sock.type)
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,6 +1,8 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
+from pypy.interpreter.gateway import interp2app
+from pypy.module._file.test.test_file import regex_search
from rpython.tool.udir import udir
from rpython.rlib import rsocket
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -12,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -41,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -57,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -97,7 +97,7 @@
def test_fromfd():
# XXX review
if not hasattr(socket, 'fromfd'):
- py.test.skip("No socket.fromfd on this platform")
+ pytest.skip("No socket.fromfd on this platform")
orig_fd = path.open()
fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()),
space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM),
@@ -157,7 +157,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -173,9 +173,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -194,9 +194,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -215,7 +215,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -229,7 +229,7 @@
w_l = space.appexec([w_socket, space.wrap(host), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, long(port))")
assert space.unwrap(w_l) == info
- py.test.skip("Unicode conversion is too slow")
+ pytest.skip("Unicode conversion is too slow")
w_l = space.appexec([w_socket, space.wrap(unicode(host)), space.wrap(port)],
"(_socket, host, port): return _socket.getaddrinfo(host, port)")
assert space.unwrap(w_l) == info
@@ -250,7 +250,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -314,6 +314,7 @@
def setup_class(cls):
cls.space = space
cls.w_udir = space.wrap(str(udir))
+ cls.w_regex_search = space.wrap(interp2app(regex_search))
def teardown_class(cls):
if not cls.runappdirect:
@@ -402,6 +403,64 @@
if os.name != 'nt':
raises(OSError, os.close, fileno)
+ def test_socket_track_resources(self):
+ import _socket, os, gc, sys, cStringIO
+ s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
+ fileno = s.fileno()
+ assert s.fileno() >= 0
+ s.close()
+ assert s.fileno() < 0
+ s.close()
+ if os.name != 'nt':
+ raises(OSError, os.close, fileno)
+
+ @pytest.mark.skipif("config.option.runappdirect")
+ def test_track_resources(self):
+ import os, gc, sys, cStringIO
+ import _socket
+ if '__pypy__' not in sys.builtin_module_names:
+ skip("pypy specific test")
+ #
+ def fn(flag1, flag2, do_close=False):
+ sys.pypy_set_track_resources(flag1)
+ mysock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
+ sys.pypy_set_track_resources(flag2)
+ buf = cStringIO.StringIO()
+ preverr = sys.stderr
+ try:
+ sys.stderr = buf
+ if do_close:
+ mysock.close()
+ del mysock
+ gc.collect() # force __del__ to be called
+ finally:
+ sys.stderr = preverr
+ sys.pypy_set_track_resources(False)
+ return buf.getvalue()
+
+ # check with track_resources disabled
+ assert fn(False, False) == ""
+ #
+ # check that we don't get the warning if we actually closed the socket
+ msg = fn(True, True, do_close=True)
+ assert msg == ''
+ #
+ # check with track_resources enabled
+ msg = fn(True, True)
+ assert self.regex_search(r"""
+ WARNING: unclosed
+ Created at \(most recent call last\):
+ File ".*", line .*, in test_track_resources
+ File ".*", line .*, in fn
+ """, msg)
+ #
+ # track_resources is enabled after the construction of the socket. in
+ # this case, the socket is not registered for finalization at all, so
+ # we don't see a message
+ msg = fn(False, True)
+ assert msg == ''
+
+
def test_socket_close_error(self):
import _socket, os
if os.name == 'nt':
@@ -630,11 +689,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -650,11 +709,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -135,7 +135,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -147,7 +147,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -181,7 +181,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -193,7 +193,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -228,7 +228,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def RAND_status(space):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -11,6 +11,9 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from rpython.rlib.objectmodel import dont_inline
+from rpython.rlib.rfile import (FILEP, c_fread, c_fclose, c_fwrite,
+ c_fdopen, c_fileno,
+ c_fopen)# for tests
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -85,44 +88,32 @@
assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
-FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
dash = '_'
else:
dash = ''
-fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
-fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
- FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
-_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
- if not is_valid_fd(fileno(fp)):
+ if not is_valid_fd(c_fileno(fp)):
return -1
- return _fclose(fp)
+ return c_fclose(fp)
-_fwrite = rffi.llexternal('fwrite',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fwrite(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fwrite(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fwrite(buf, sz, n, fp)
-_fread = rffi.llexternal('fread',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fread(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fread(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fread(buf, sz, n, fp)
_feof = rffi.llexternal('feof', [FILEP], rffi.INT)
def feof(fp):
- validate_fd(fileno(fp))
+ validate_fd(c_fileno(fp))
return _feof(fp)
def is_valid_fp(fp):
- return is_valid_fd(fileno(fp))
+ return is_valid_fd(c_fileno(fp))
pypy_decl = 'pypy_decl.h'
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py
--- a/pypy/module/cpyext/pyfile.py
+++ b/pypy/module/cpyext/pyfile.py
@@ -1,6 +1,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
+from rpython.rlib.rfile import c_setvbuf, _IONBF
from pypy.module.cpyext.api import (
- cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, fdopen)
+ cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, c_fdopen)
from pypy.module.cpyext.pyobject import PyObject
from pypy.module.cpyext.object import Py_PRINT_RAW
from pypy.interpreter.error import (OperationError, oefmt,
@@ -64,11 +65,12 @@
if (fd < 0 or not mode or mode[0] not in ['r', 'w', 'a', 'U'] or
('U' in mode and ('w' in mode or 'a' in mode))):
raise oefmt(space.w_IOError, 'invalid fileno or mode')
- ret = fdopen(fd, mode)
+ ret = c_fdopen(fd, mode)
if not ret:
raise exception_from_saved_errno(space, space.w_IOError)
+ # XXX fix this once use-file-star-for-file lands
+ c_setvbuf(ret, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0)
return ret
-
@cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject)
def PyFile_FromFile(space, fp, name, mode, close):
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.eval import (
Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
-from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
+from pypy.module.cpyext.api import c_fopen, c_fclose, c_fileno, Py_ssize_tP
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.astcompiler import consts
from rpython.tool.udir import udir
@@ -130,19 +130,19 @@
def test_run_file(self, space, api):
filepath = udir / "cpyext_test_runfile.py"
filepath.write("raise ZeroDivisionError")
- fp = fopen(str(filepath), "rb")
+ fp = c_fopen(str(filepath), "rb")
filename = rffi.str2charp(str(filepath))
w_globals = w_locals = space.newdict()
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_ZeroDivisionError
api.PyErr_Clear()
# try again, but with a closed file
- fp = fopen(str(filepath), "rb")
- os.close(fileno(fp))
+ fp = c_fopen(str(filepath), "rb")
+ os.close(c_fileno(fp))
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_IOError
api.PyErr_Clear()
diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py
--- a/pypy/module/cpyext/test/test_pyfile.py
+++ b/pypy/module/cpyext/test/test_pyfile.py
@@ -1,5 +1,4 @@
from pypy.conftest import option
-from pypy.module.cpyext.api import fopen, fclose, fwrite
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.object import Py_PRINT_RAW
@@ -133,6 +132,15 @@
return PyLong_FromLong(0);
return PyLong_FromLong(ftell(fp));
"""),
+ ("read_10", "METH_O",
+ """
+ char s[10];
+ FILE * fp = PyFile_AsFile(args);
+ if (fp == NULL)
+ return PyLong_FromLong(0);
+ fread(s, 1, 10, fp);
+ return PyLong_FromLong(ftell(fp));
+ """),
])
filename = self.udir + "/_test_file"
with open(filename, 'w') as fid:
@@ -142,5 +150,12 @@
t_py = fid.tell()
assert t_py == 80
t_c = module.get_c_tell(fid)
- assert t_c == t_py
+ assert t_c == t_py
+ print '-------- tell ',t_c
+ t_c = module.read_10(fid)
+ assert t_c == t_py + 10
+ print '-------- tell ',t_c
+ t_py = fid.tell()
+ assert t_c == t_py, 'after a fread, c level ftell(fp) %d but PyFile.tell() %d' % (t_c, t_py)
+
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -20,6 +20,7 @@
self.defaultencoding = "ascii"
self.filesystemencoding = None
self.debug = True
+ self.track_resources = False
self.dlopenflags = rdynload._dlopen_default_mode()
interpleveldefs = {
@@ -55,6 +56,8 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
+ 'pypy_set_track_resources' : 'vm.set_track_resources',
+ 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,6 +61,13 @@
"""
return space.wrap(space.sys.recursionlimit)
+ at unwrap_spec(flag=bool)
+def set_track_resources(space, flag):
+ space.sys.track_resources = flag
+
+def get_track_resources(space):
+ return space.wrap(space.sys.track_resources)
+
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
@@ -130,7 +130,7 @@
cls.module = str(udir.join('testownlib.dll'))
else:
subprocess.check_call(
- 'gcc testownlib.c -shared -fPIC -o testownlib.so',
+ 'cc testownlib.c -shared -fPIC -o testownlib.so',
cwd=str(udir), shell=True)
cls.module = str(udir.join('testownlib.so'))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -852,9 +852,12 @@
assert str(e2.value) == "foo0() takes no arguments (2 given)"
assert str(e3.value) == "foo1() takes exactly one argument (0 given)"
assert str(e4.value) == "foo1() takes exactly one argument (2 given)"
- assert str(e5.value) == "foo2() takes exactly 2 arguments (0 given)"
- assert str(e6.value) == "foo2() takes exactly 2 arguments (1 given)"
- assert str(e7.value) == "foo2() takes exactly 2 arguments (3 given)"
+ assert str(e5.value) in ["foo2 expected 2 arguments, got 0",
+ "foo2() takes exactly 2 arguments (0 given)"]
+ assert str(e6.value) in ["foo2 expected 2 arguments, got 1",
+ "foo2() takes exactly 2 arguments (1 given)"]
+ assert str(e7.value) in ["foo2 expected 2 arguments, got 3",
+ "foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
@@ -1916,3 +1919,47 @@
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
+def test_struct_field_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[2]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+
+def test_function_arg_opaque():
+ py.test.skip("can currently declare a function with an opaque struct "
+ "as argument, but AFAICT it's impossible to call it later")
+
+def test_function_returns_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a foo(int);")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_function_returns_opaque", "?")
+ assert str(e.value) == ("function foo: 'struct a' is used as result type,"
+ " but is opaque")
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -428,4 +428,5 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
+FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -293,6 +293,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -304,6 +305,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
while frame.i > 3:
- jitdriver.can_enter_jit(frame=frame, total=total, j=j)
- jitdriver.jit_merge_point(frame=frame, total=total, j=j)
+ jitdriver.can_enter_jit(frame=frame, total=total, j=j,
+ prev_s=prev_s)
+ jitdriver.jit_merge_point(frame=frame, total=total, j=j,
+ prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
@@ -82,6 +86,11 @@
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
+ s = rstr.mallocstr(16)
+ rgc.ll_write_final_null_char(s)
+ rgc.ll_write_final_null_char(prev_s)
+ if (frame.i & 3) == 0:
+ prev_s = s
return chr(total % 253)
#
class Virt2(object):
diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -994,6 +994,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale)
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -2,7 +2,8 @@
from rpython.rlib import jit
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rvmprof import cintf
+from rpython.rlib.rvmprof import cintf, vmprof_execute_code, register_code,\
+ register_code_object_class, _get_vmprof
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
@@ -14,6 +15,7 @@
def helper():
stack = cintf.vmprof_tl_stack.getraw()
+ print stack
if stack:
# not during tracing
visited.append(stack.c_value)
@@ -22,15 +24,34 @@
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=[], reds='auto')
+ driver = jit.JitDriver(greens=['code'], reds='auto')
- def f(n):
+ class CodeObj(object):
+ pass
+
+ def get_code_fn(code, arg):
+ return code
+
+ def get_name(code):
+ return "foo"
+
+ register_code_object_class(CodeObj, get_name)
+
+ @vmprof_execute_code("main", get_code_fn)
+ def f(code, n):
i = 0
while i < n:
- driver.jit_merge_point()
+ driver.jit_merge_point(code=code)
i += 1
llfn()
+ def main(n):
+ cintf.vmprof_tl_stack.setraw(null) # make it empty
+ vmprof = _get_vmprof()
+ code = CodeObj()
+ register_code(code, get_name)
+ return f(code, n)
+
class Hooks(jit.JitHookInterface):
def after_compile(self, debug_info):
self.raw_start = debug_info.asminfo.rawstart
@@ -38,12 +59,12 @@
hooks = Hooks()
null = lltype.nullptr(cintf.VMPROFSTACK)
- cintf.vmprof_tl_stack.setraw(null) # make it empty
- self.meta_interp(f, [10], policy=JitPolicy(hooks))
- v = set(visited)
- assert 0 in v
- v.remove(0)
- assert len(v) == 1
- assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- assert cintf.vmprof_tl_stack.getraw() == null
+ self.meta_interp(main, [10], policy=JitPolicy(hooks))
+ print visited
+ #v = set(visited)
+ #assert 0 in v
+ #v.remove(0)
+ #assert len(v) == 1
+ #assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
+ #assert cintf.vmprof_tl_stack.getraw() == null
# ^^^ make sure we didn't leave anything dangling
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1673,25 +1673,6 @@
dest_addr = AddressLoc(base_loc, ofs_loc, scale, offset_loc.value)
self.save_into_mem(dest_addr, value_loc, size_loc)
- def genop_discard_strsetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
- self.cpu.translate_support_code)
- assert itemsize == 1
- dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize)
- self.mc.MOV8(dest_addr, val_loc.lowest8bits())
-
- def genop_discard_unicodesetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
- self.cpu.translate_support_code)
- if itemsize == 4:
- self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc)
- elif itemsize == 2:
- self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc)
- else:
- assert 0, itemsize
-
# genop_discard_setfield_raw = genop_discard_setfield_gc
def genop_math_read_timestamp(self, op, arglocs, resloc):
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1219,6 +1219,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self.assembler.load_effective_addr(ofsloc, ofs_items, scale,
resloc, baseloc)
diff --git a/rpython/jit/backend/x86/test/test_rvmprof.py b/rpython/jit/backend/x86/test/test_rvmprof.py
--- a/rpython/jit/backend/x86/test/test_rvmprof.py
+++ b/rpython/jit/backend/x86/test/test_rvmprof.py
@@ -3,5 +3,5 @@
from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest
from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
-class TestFfiCall(Jit386Mixin, BaseRVMProfTest):
- pass
\ No newline at end of file
+class TestRVMProfCall(Jit386Mixin, BaseRVMProfTest):
+ pass
diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -991,6 +991,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
From pypy.commits at gmail.com Sun Aug 7 16:24:37 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 07 Aug 2016 13:24:37 -0700 (PDT)
Subject: [pypy-commit] pypy cpyext-realloc: skip test if untranslated
Message-ID: <57a79905.c15e1c0a.917d6.b83a@mx.google.com>
Author: Matti Picus
Branch: cpyext-realloc
Changeset: r86071:6d1586833c45
Date: 2016-08-07 23:20 +0300
http://bitbucket.org/pypy/pypy/changeset/6d1586833c45/
Log: skip test if untranslated
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -236,6 +236,8 @@
assert x == -424344
def test_object_realloc(self):
+ if not self.runappdirect:
+ skip('no untranslated support for realloc')
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
"""
From pypy.commits at gmail.com Sun Aug 7 16:24:43 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 07 Aug 2016 13:24:43 -0700 (PDT)
Subject: [pypy-commit] pypy default: document merged branch
Message-ID: <57a7990b.497bc20a.273c8.08ae@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86074:ebfe94bde5ab
Date: 2016-08-07 23:23 +0300
http://bitbucket.org/pypy/pypy/changeset/ebfe94bde5ab/
Log: document merged branch
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -124,3 +124,7 @@
Add a new command line option -X track-resources which will produce
ResourceWarnings when the GC closes unclosed files and sockets.
+
+.. branch: cpyext-realloc
+
+Implement PyObject_Realloc
From pypy.commits at gmail.com Mon Aug 8 02:54:41 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 07 Aug 2016 23:54:41 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Improve the test,
fix by adding another missing case
Message-ID: <57a82cb1.031dc20a.af26a.a020@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86075:d58a138573e9
Date: 2016-08-08 08:54 +0200
http://bitbucket.org/pypy/pypy/changeset/d58a138573e9/
Log: Improve the test, fix by adding another missing case
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -139,7 +139,8 @@
else:
llfn()
c -= 1
- jit.promote(c + 5) # failing guard
+ if c & 1: # a failing guard
+ pass
raise MyExc(c)
def main(n):
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1458,6 +1458,20 @@
from rpython.rlib.rvmprof import cintf
cintf.jit_rvmprof_code(leaving, box_unique_id.getint())
+ def handle_rvmprof_enter_on_resume(self):
+ code = self.bytecode
+ position = self.pc
+ opcode = ord(code[position])
+ if opcode == self.metainterp.staticdata.op_rvmprof_code:
+ arg1 = self.registers_i[ord(code[position + 1])].getint()
+ arg2 = self.registers_i[ord(code[position + 2])].getint()
+ if arg1 == 1:
+ # we are resuming at a position that will do a
+ # jit_rvmprof_code(1), when really executed. That's a
+ # hint for the need for a jit_rvmprof_code(0).
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(0, arg2)
+
# ------------------------------
def setup_call(self, argboxes):
diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py
--- a/rpython/jit/metainterp/resume.py
+++ b/rpython/jit/metainterp/resume.py
@@ -1058,6 +1058,7 @@
f.setup_resume_at_op(pc)
resumereader.consume_boxes(f.get_current_position_info(),
f.registers_i, f.registers_r, f.registers_f)
+ f.handle_rvmprof_enter_on_resume()
return resumereader.liveboxes, virtualizable_boxes, virtualref_boxes
From pypy.commits at gmail.com Mon Aug 8 07:35:24 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Mon, 08 Aug 2016 04:35:24 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: added unpack/pack test
stressing the operation
Message-ID: <57a86e7c.c4ebc20a.86c3a.12c8@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86076:b6f69665e955
Date: 2016-08-08 13:34 +0200
http://bitbucket.org/pypy/pypy/changeset/b6f69665e955/
Log: added unpack/pack test stressing the operation
diff --git a/rpython/jit/backend/ppc/vector_ext.py b/rpython/jit/backend/ppc/vector_ext.py
--- a/rpython/jit/backend/ppc/vector_ext.py
+++ b/rpython/jit/backend/ppc/vector_ext.py
@@ -92,83 +92,35 @@
self.VEC_DOUBLE_WORD_ONES = mem
def emit_vec_load_f(self, op, arglocs, regalloc):
- resloc, baseloc, indexloc, size_loc, ofs, integer_loc, aligned_loc = arglocs
+ resloc, baseloc, indexloc, size_loc, ofs, integer_loc = arglocs
indexloc = self._apply_offset(indexloc, ofs)
itemsize = size_loc.value
- if itemsize == 4:
+ if integer_loc.value:
+ self.mc.lxvd2x(resloc.value, indexloc.value, baseloc.value)
+ elif itemsize == 4:
self.mc.lxvw4x(resloc.value, indexloc.value, baseloc.value)
elif itemsize == 8:
self.mc.lxvd2x(resloc.value, indexloc.value, baseloc.value)
+ else:
+ not_implemented("vec_load_f itemsize %d" % itemsize)
- def emit_vec_load_i(self, op, arglocs, regalloc):
- resloc, baseloc, indexloc, size_loc, ofs, \
- Vhiloc, Vloloc, Vploc, tloc = arglocs
- indexloc = self._apply_offset(indexloc, ofs)
- Vlo = Vloloc.value
- Vhi = Vhiloc.value
- self.mc.lvx(Vhi, indexloc.value, baseloc.value)
- Vp = Vploc.value
- t = tloc.value
- if IS_BIG_ENDIAN:
- self.mc.lvsl(Vp, indexloc.value, baseloc.value)
- else:
- self.mc.lvsr(Vp, indexloc.value, baseloc.value)
- self.mc.addi(t, baseloc.value, 16)
- self.mc.lvx(Vlo, indexloc.value, t)
- if IS_BIG_ENDIAN:
- self.mc.vperm(resloc.value, Vhi, Vlo, Vp)
- else:
- self.mc.vperm(resloc.value, Vlo, Vhi, Vp)
+ emit_vec_load_i = emit_vec_load_f
def emit_vec_store(self, op, arglocs, regalloc):
baseloc, indexloc, valueloc, sizeloc, baseofs, \
- integer_loc, aligned_loc = arglocs
+ integer_loc = arglocs
indexloc = self._apply_offset(indexloc, baseofs)
assert baseofs.value == 0
if integer_loc.value:
- Vloloc = regalloc.vrm.get_scratch_reg(type=INT)
- Vhiloc = regalloc.vrm.get_scratch_reg(type=INT)
- Vploc = regalloc.vrm.get_scratch_reg(type=INT)
- tloc = regalloc.rm.get_scratch_reg()
- V1sloc = regalloc.vrm.get_scratch_reg(type=INT)
- V1s = V1sloc.value
- V0sloc = regalloc.vrm.get_scratch_reg(type=INT)
- V0s = V0sloc.value
- Vmaskloc = regalloc.vrm.get_scratch_reg(type=INT)
- Vmask = Vmaskloc.value
- Vlo = Vhiloc.value
- Vhi = Vloloc.value
- Vp = Vploc.value
- t = tloc.value
- Vs = valueloc.value
- # UFF, that is a lot of code for storing unaligned!
- # probably a lot of room for improvement (not locally,
- # but in general for the algorithm)
- self.mc.lvx(Vhi, indexloc.value, baseloc.value)
- #self.mc.lvsr(Vp, indexloc.value, baseloc.value)
- if IS_BIG_ENDIAN:
- self.mc.lvsr(Vp, indexloc.value, baseloc.value)
- else:
- self.mc.lvsl(Vp, indexloc.value, baseloc.value)
- self.mc.addi(t, baseloc.value, 16)
- self.mc.lvx(Vlo, indexloc.value, t)
- self.mc.vspltisb(V1s, -1)
- self.mc.vspltisb(V0s, 0)
- if IS_BIG_ENDIAN:
- self.mc.vperm(Vmask, V0s, V1s, Vp)
- else:
- self.mc.vperm(Vmask, V1s, V0s, Vp)
- self.mc.vperm(Vs, Vs, Vs, Vp)
- self.mc.vsel(Vlo, Vs, Vlo, Vmask)
- self.mc.vsel(Vhi, Vhi, Vs, Vmask)
- self.mc.stvx(Vlo, indexloc.value, t)
- self.mc.stvx(Vhi, indexloc.value, baseloc.value)
+ self.mc.stxvd2x(valueloc.value, indexloc.value, baseloc.value)
else:
itemsize = sizeloc.value
if itemsize == 4:
self.mc.stxvw4x(valueloc.value, indexloc.value, baseloc.value)
elif itemsize == 8:
self.mc.stxvd2x(valueloc.value, indexloc.value, baseloc.value)
+ else:
+ not_implemented("vec_store itemsize %d" % itemsize)
def emit_vec_int_add(self, op, arglocs, regalloc):
resloc, loc0, loc1, size_loc = arglocs
@@ -631,7 +583,6 @@
not descr.is_array_of_structs()
itemsize, ofs, _ = unpack_arraydescr(descr)
integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT)
- aligned = False
args = op.getarglist()
a0 = op.getarg(0)
a1 = op.getarg(1)
@@ -639,28 +590,9 @@
ofs_loc = self.ensure_reg(a1)
result_loc = self.force_allocate_vector_reg(op)
return [result_loc, base_loc, ofs_loc, imm(itemsize), imm(ofs),
- imm(integer), imm(aligned)]
+ imm(integer)]
- def _prepare_load_i(self, op):
- descr = op.getdescr()
- assert isinstance(descr, ArrayDescr)
- assert not descr.is_array_of_pointers() and \
- not descr.is_array_of_structs()
- itemsize, ofs, _ = unpack_arraydescr(descr)
- args = op.getarglist()
- a0 = op.getarg(0)
- a1 = op.getarg(1)
- base_loc = self.ensure_reg(a0)
- ofs_loc = self.ensure_reg(a1)
- result_loc = self.force_allocate_vector_reg(op)
- tloc = self.rm.get_scratch_reg()
- Vhiloc = self.vrm.get_scratch_reg(type=INT)
- Vloloc = self.vrm.get_scratch_reg(type=INT)
- Vploc = self.vrm.get_scratch_reg(type=INT)
- return [result_loc, base_loc, ofs_loc, imm(itemsize), imm(ofs),
- Vhiloc, Vloloc, Vploc, tloc]
-
- prepare_vec_load_i = _prepare_load_i
+ prepare_vec_load_i = _prepare_load
prepare_vec_load_f = _prepare_load
def prepare_vec_arith(self, op):
@@ -720,9 +652,8 @@
valueloc = self.ensure_vector_reg(a2)
integer = not (descr.is_array_of_floats() or descr.getconcrete_type() == FLOAT)
- aligned = False
return [baseloc, ofsloc, valueloc,
- imm(itemsize), imm(ofs), imm(integer), imm(aligned)]
+ imm(itemsize), imm(ofs), imm(integer)]
def prepare_vec_int_signext(self, op):
assert isinstance(op, VectorOp)
diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
--- a/rpython/jit/backend/x86/vector_ext.py
+++ b/rpython/jit/backend/x86/vector_ext.py
@@ -531,6 +531,8 @@
self.mc.SHUFPD_xxi(resloc.value, resloc.value, 1)
self.mc.UNPCKHPD(resloc, srcloc)
# if they are equal nothing is to be done
+ else:
+ not_implemented("pack/unpack for size %d", size)
genop_vec_unpack_f = genop_vec_pack_f
diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -18,6 +18,13 @@
from rpython.rlib.objectmodel import (specialize, is_annotation_constant,
always_inline)
from rpython.jit.backend.detect_cpu import getcpuclass
+from rpython.jit.tool.oparser import parse
+from rpython.jit.metainterp.history import (AbstractFailDescr,
+ AbstractDescr,
+ BasicFailDescr, BasicFinalDescr,
+ JitCellToken, TargetToken,
+ ConstInt, ConstPtr,
+ Const, ConstFloat)
CPU = getcpuclass()
@@ -78,7 +85,6 @@
enable_opts = 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll'
def setup_method(self, method):
- import pdb; pdb.set_trace()
if not self.supports_vector_ext():
py.test.skip("this cpu %s has no implemented vector backend" % CPU)
@@ -718,5 +724,80 @@
res = self.meta_interp(f, [22], vec_all=True, vec_guard_ratio=5)
assert res == f(22)
+ def run_unpack(self, unpack, vector_type, assignments, float=True):
+ vars = {'v':0,'f':0,'i':0}
+ def newvar(type):
+ c = vars[type]
+ vars[type] = c + 1
+ if type == 'v':
+ return type + str(c) + vector_type
+ return type + str(c)
+ targettoken = TargetToken()
+ finaldescr = BasicFinalDescr(1)
+ args = []
+ args_values = []
+ pack = []
+ suffix = 'f' if float else 'i'
+ for var, vals in assignments.items():
+ v = newvar('v')
+ pack.append('%s = vec_%s()' % (v, suffix))
+ for i,val in enumerate(vals):
+ args_values.append(val)
+ f = newvar('f')
+ args.append(f)
+ count = 1
+ # create a new variable
+ vo = v
+ v = newvar('v')
+ pack.append('%s = vec_pack_%s(%s, %s, %d, %d)' % \
+ (v, suffix, vo, f, i, count))
+ vars['x'] = v
+ packs = '\n '.join(pack)
+ resvar = suffix + '{'+suffix+'}'
+ source = '''
+ [{args}]
+ label({args}, descr=targettoken)
+ {packs}
+ {unpack}
+ finish({resvar}, descr=finaldescr)
+ '''.format(args=','.join(args),packs=packs, unpack=unpack.format(**vars),
+ resvar=resvar.format(**vars))
+ loop = parse(source, namespace={'targettoken': targettoken,
+ 'finaldescr': finaldescr})
+
+ cpu = self.CPUClass(rtyper=None, stats=None)
+ cpu.setup_once()
+ #
+ looptoken = JitCellToken()
+ cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
+ deadframe = cpu.execute_token(looptoken, *args_values)
+ print(source)
+ if float:
+ return cpu.get_float_value(deadframe, 0)
+ else:
+ return cpu.get_int_value(deadframe, 0)
+
+ def test_unpack(self):
+ # double unpack
+ assert self.run_unpack("f{f} = vec_unpack_f({x}, 0, 1)",
+ "[2xf64]", {'x': (1.2,-1)}) == 1.2
+ assert self.run_unpack("f{f} = vec_unpack_f({x}, 1, 1)",
+ "[2xf64]", {'x': (50.33,4321.0)}) == 4321.0
+ # int64
+ assert self.run_unpack("i{i} = vec_unpack_i({x}, 0, 1)",
+ "[2xi64]", {'x': (11,12)}, float=False) == 11
+ assert self.run_unpack("i{i} = vec_unpack_i({x}, 1, 1)",
+ "[2xi64]", {'x': (14,15)}, float=False) == 15
+
+ ## integer unpack (byte)
+ for i in range(16):
+ op = "i{i} = vec_unpack_i({x}, %d, 1)" % i
+ assert self.run_unpack(op, "[16xi8]", {'x': [127,1]*8}, float=False) == (127 if i%2==0 else 1)
+ if i < 8:
+ assert self.run_unpack(op, "[2xi16]", {'x': [2**15-1,0]*4}, float=False) == (2**15-1 if i%2==0 else 0)
+ if i < 4:
+ assert self.run_unpack(op, "[2xi32]", {'x': [2**31-1,0]*4}, float=False) == (2**31-1 if i%2==0 else 0)
+
+
class TestLLtype(LLJitMixin, VectorizeTests):
pass
diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py
--- a/rpython/jit/tool/oparser.py
+++ b/rpython/jit/tool/oparser.py
@@ -299,6 +299,7 @@
vecinfo.datatype = match.group(3)
vecinfo.bytesize = int(match.group(4)) // 8
resop._vec_debug_info = vecinfo
+ resop.bytesize = vecinfo.bytesize
return var[:var.find('[')]
vecinfo = VectorizationInfo(resop)
From pypy.commits at gmail.com Mon Aug 8 12:01:34 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 08 Aug 2016 09:01:34 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Add test for asyncio checking a GIL
initialization error on running "await asyncio.open_connection"
Message-ID: <57a8acde.c41f1c0a.c04bb.4532@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86077:3e98b6f5791a
Date: 2016-08-08 18:00 +0200
http://bitbucket.org/pypy/pypy/changeset/3e98b6f5791a/
Log: Add test for asyncio checking a GIL initialization error on running
"await asyncio.open_connection"
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -0,0 +1,17 @@
+class AppTestAsyncIO(object):
+
+ spaceconfig = dict(usemodules=["select","_socket","thread","signal","struct","_multiprocessing","array","_posixsubprocess","fcntl","unicodedata"])
+
+ def setup_class(cls):
+ cls.space.appexec([], """():
+ import encodings.idna
+ import asyncio
+ async def f():
+ reader, writer = await asyncio.open_connection('example.com', 80)
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(f())""")
+
+ def test_gil_issue(self):
+ #needed to execute setup_call in the first place
+ assert 1==1
\ No newline at end of file
From pypy.commits at gmail.com Mon Aug 8 12:48:02 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:02 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: fix translation
Message-ID: <57a8b7c2.c3f0c20a.4a412.a6db@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86078:f12312dfba23
Date: 2016-06-30 19:35 +0200
http://bitbucket.org/pypy/pypy/changeset/f12312dfba23/
Log: fix translation
diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
--- a/rpython/jit/metainterp/logger.py
+++ b/rpython/jit/metainterp/logger.py
@@ -81,6 +81,7 @@
debug_stop("jit-log-compiling-bridge")
else:
debug_start("jit-log-opt-bridge")
+ logops = None
if have_debug_prints():
print_after_inputargs = ''
debug_print("# bridge out of Guard",
From pypy.commits at gmail.com Mon Aug 8 12:48:05 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:05 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: fix a bug around logging ops
Message-ID: <57a8b7c5.68adc20a.25dd3.a24e@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86079:464652309dcc
Date: 2016-06-30 19:35 +0200
http://bitbucket.org/pypy/pypy/changeset/464652309dcc/
Log: fix a bug around logging ops
diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -1136,6 +1136,9 @@
assert isinstance(newdescr, GuardCompatibleDescr)
compat_cond = newdescr._compatibility_conditions
self.other_compat_conditions.append(compat_cond)
+ if not compat_cond:
+ assert self.fallback_jump_target == 0 # this can never happen twice
+ self.fallback_jump_target = -1
asminfo = ResumeGuardDescr.compile_and_attach(
self, metainterp, new_loop, orig_inputargs)
# note that the backend will not patch the switch at all, so it is
@@ -1143,7 +1146,7 @@
if compat_cond:
compat_cond.jump_target = asminfo.asmaddr
else:
- assert self.fallback_jump_target == 0 # this can never happen twice
+ assert self.fallback_jump_target == -1 # this can never happen twice
self.fallback_jump_target = asminfo.asmaddr
return asminfo
diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
--- a/rpython/jit/metainterp/logger.py
+++ b/rpython/jit/metainterp/logger.py
@@ -89,7 +89,7 @@
"with", len(operations), "ops")
logops = self._make_log_operations(memo)
if isinstance(descr, GuardCompatibleDescr):
- if descr.fallback_jump_target == 0:
+ if descr.fallback_jump_target != -1:
# this means it's the last attached guard
ccond = descr.other_compat_conditions[-1]
argrepr = logops.repr_of_arg(
From pypy.commits at gmail.com Mon Aug 8 12:48:06 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:06 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: since b116f09c4e9d we can no
longer clear the last_quasi_immut_field_op in the GuardCompatibleDescr
Message-ID: <57a8b7c6.12331c0a.bb1d6.5509@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86080:9e5474cdc047
Date: 2016-06-30 19:48 +0200
http://bitbucket.org/pypy/pypy/changeset/9e5474cdc047/
Log: since b116f09c4e9d we can no longer clear the
last_quasi_immut_field_op in the GuardCompatibleDescr
diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
--- a/rpython/jit/metainterp/compatible.py
+++ b/rpython/jit/metainterp/compatible.py
@@ -138,7 +138,6 @@
if not qmutdescr.is_still_valid_for(self.known_valid):
return None, None
copied_op.setarg(2, qmutdescr.constantfieldbox)
- self.last_quasi_immut_field_op = None
return copied_op, QuasiimmutGetfieldAndPureCallCondition(
op, qmutdescr, optimizer)
diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py
--- a/rpython/jit/metainterp/test/test_compatible.py
+++ b/rpython/jit/metainterp/test/test_compatible.py
@@ -538,7 +538,6 @@
assert x < 30
self.check_trace_count(7)
-
def test_quasi_immutable_merge_short_preamble(self):
from rpython.rlib.objectmodel import we_are_translated
class C(object):
@@ -614,3 +613,64 @@
self.check_resops(call_i=0)
+ def test_like_objects(self):
+ from rpython.rlib.objectmodel import we_are_translated
+ class Map(object):
+ _immutable_fields_ = ['version?']
+
+ def __init__(self):
+ self.version = Version()
+ self.dct = {}
+
+ def instantiate(self):
+ return Obj(self)
+
+ @jit.elidable_compatible(quasi_immut_field_name_for_second_arg='version')
+ def lookup_version(self, version, name):
+ return self.dct.get(name, -1)
+
+ class Version(object):
+ pass
+
+ class Obj(object):
+ def __init__(self, map):
+ self.map = map
+
+ def lookup(self, name):
+ map = self.map
+ assert isinstance(map, Map)
+ map = jit.hint(map, promote_compatible=True)
+ return map.lookup_version(name)
+
+ m1 = Map()
+ m1.dct['a'] = 1
+ m1.dct['b'] = 2
+ m2 = Map()
+ m2.dct['a'] = 1
+ m2.dct['b'] = 2
+ m2.dct['c'] = 5
+
+ p1 = m1.instantiate()
+ p2 = m2.instantiate()
+
+ driver = jit.JitDriver(greens = [], reds = ['n', 'res', 'p'])
+
+ def f(n, p):
+ res = 0
+ while n > 0:
+ driver.jit_merge_point(n=n, p=p, res=res)
+ res += p.lookup('a')
+ res += p.lookup('c')
+ res += p.lookup('b')
+ n -= 1
+ return res
+
+ def main(x):
+ res = f(100, p1)
+ res = f(100, p2)
+ main(True)
+ main(False)
+
+ self.meta_interp(main, [True], backendopt=True)
+ self.check_trace_count(2)
+ self.check_resops(call_i=0)
From pypy.commits at gmail.com Mon Aug 8 12:48:08 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:08 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: fix fake jit_debug ops
Message-ID: <57a8b7c8.53b81c0a.e11d7.57e9@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86081:12edeb760312
Date: 2016-07-06 17:49 +0200
http://bitbucket.org/pypy/pypy/changeset/12edeb760312/
Log: fix fake jit_debug ops
diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
--- a/rpython/jit/metainterp/compatible.py
+++ b/rpython/jit/metainterp/compatible.py
@@ -183,6 +183,8 @@
def repr_of_conditions_as_jit_debug(self, argrepr="?"):
conditions = [cond.repr(argrepr) for cond in self.conditions]
+ # slow but who cares
+ conditions = "\n".join(conditions).split("\n")
# make fake jit-debug ops to print
for i in range(len(conditions)):
conditions[i] = "jit_debug('%s')" % (conditions[i], )
From pypy.commits at gmail.com Mon Aug 8 12:48:10 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:10 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: make using objects as keys in
dicts/in sets not read their types
Message-ID: <57a8b7ca.411d1c0a.81e64.6185@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86082:71a4e8e68f14
Date: 2016-07-07 18:33 +0200
http://bitbucket.org/pypy/pypy/changeset/71a4e8e68f14/
Log: make using objects as keys in dicts/in sets not read their types
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1766,6 +1766,11 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
+ def compares_by_identity(self, w_obj):
+ """ returns True if the object compares by identity (ie inherits __eq__
+ and __hash__ from object) """
+ return self.type(w_obj).compares_by_identity()
+
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -592,16 +592,16 @@
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
+ from pypy.objspace.std.intobject import W_IntObject
if type(w_key) is self.space.StringObjectCls:
self.switch_to_bytes_strategy(w_dict)
return
elif type(w_key) is self.space.UnicodeObjectCls:
self.switch_to_unicode_strategy(w_dict)
return
- w_type = self.space.type(w_key)
- if self.space.is_w(w_type, self.space.w_int):
+ elif type(w_key) is W_IntObject:
self.switch_to_int_strategy(w_dict)
- elif w_type.compares_by_identity():
+ if self.space.compares_by_identity(w_key):
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
diff --git a/pypy/objspace/std/identitydict.py b/pypy/objspace/std/identitydict.py
--- a/pypy/objspace/std/identitydict.py
+++ b/pypy/objspace/std/identitydict.py
@@ -70,8 +70,7 @@
return self.erase(d)
def is_correct_type(self, w_obj):
- w_type = self.space.type(w_obj)
- return w_type.compares_by_identity()
+ return self.space.compares_by_identity(w_obj)
def _never_equal_to(self, w_lookup_type):
return False
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -352,6 +352,9 @@
from pypy.objspace.std.typeobject import _issubtype
return _issubtype(self.terminator.w_cls, w_type)
+ @jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version")
+ def _type_compares_by_identity(self, version):
+ return self.terminator.w_cls.compares_by_identity()
class Terminator(AbstractAttribute):
_immutable_fields_ = ['w_cls']
@@ -1172,3 +1175,10 @@
if version_tag is not None:
return map._type_issubtype(w_type)
return space.type(w_obj).issubtype(w_type)
+
+def mapdict_compares_by_identity(space, w_obj):
+ if we_are_jitted() and w_obj.user_overridden_class:
+ map = w_obj._get_mapdict_map_no_promote()
+ if map.version is not None:
+ return map._type_compares_by_identity()
+ return space.type(w_obj).compares_by_identity()
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -657,6 +657,10 @@
return True
return mapdict_type_isinstance(self, w_inst, w_type)
+ def compares_by_identity(self, w_obj):
+ from pypy.objspace.std.mapdict import mapdict_compares_by_identity
+ return mapdict_compares_by_identity(self, w_obj)
+
@specialize.memo()
def _get_interplevel_cls(self, w_type):
if not hasattr(self, "_interplevel_classes"):
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -796,7 +796,7 @@
strategy = self.space.fromcache(BytesSetStrategy)
elif type(w_key) is W_UnicodeObject:
strategy = self.space.fromcache(UnicodeSetStrategy)
- elif self.space.type(w_key).compares_by_identity():
+ elif self.space.compares_by_identity(w_key):
strategy = self.space.fromcache(IdentitySetStrategy)
else:
strategy = self.space.fromcache(ObjectSetStrategy)
@@ -1399,8 +1399,7 @@
return {}
def is_correct_type(self, w_key):
- w_type = self.space.type(w_key)
- return w_type.compares_by_identity()
+ return self.space.compares_by_identity(w_key)
def may_contain_equal_elements(self, strategy):
#empty first, probably more likely
@@ -1645,7 +1644,7 @@
# check for compares by identity
for w_item in iterable_w:
- if not space.type(w_item).compares_by_identity():
+ if not space.compares_by_identity(w_item):
break
else:
w_set.strategy = space.fromcache(IdentitySetStrategy)
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -1241,7 +1241,7 @@
assert (Y < Y) is True
-class AppTestComparesByIdentity:
+class AppTestComparesByIdentity(jit.RandomWeAreJittedTestMixin):
def setup_class(cls):
if cls.runappdirect:
@@ -1251,6 +1251,10 @@
return space.wrap(w_cls.compares_by_identity())
cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity))
+ def instance_compares_by_identity(space, w_obj):
+ return space.wrap(space.compares_by_identity(w_obj))
+ cls.w_instance_compares_by_identity = cls.space.wrap(interp2app(instance_compares_by_identity))
+
def test_compares_by_identity(self):
class Plain(object):
pass
@@ -1282,6 +1286,15 @@
assert self.compares_by_identity(TypeSubclass)
assert not self.compares_by_identity(TypeSubclassCustomCmp)
+ assert self.instance_compares_by_identity(Plain())
+ assert not self.instance_compares_by_identity(CustomEq())
+ assert not self.instance_compares_by_identity(CustomCmp())
+ assert not self.instance_compares_by_identity(CustomHash())
+ assert self.instance_compares_by_identity(Plain)
+ assert self.instance_compares_by_identity(TypeSubclass('a', (object, ), {}))
+ assert not self.instance_compares_by_identity(TypeSubclassCustomCmp('a', (object, ), {}))
+
+
def test_modify_class(self):
class X(object):
pass
From pypy.commits at gmail.com Mon Aug 8 12:48:12 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:12 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: fix test_dictmultiobject.py
Message-ID: <57a8b7cc.ca11c30a.76936.9bda@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86083:f1fe34175fc1
Date: 2016-08-05 18:28 +0200
http://bitbucket.org/pypy/pypy/changeset/f1fe34175fc1/
Log: fix test_dictmultiobject.py
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -592,16 +592,15 @@
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
- from pypy.objspace.std.intobject import W_IntObject
if type(w_key) is self.space.StringObjectCls:
self.switch_to_bytes_strategy(w_dict)
return
elif type(w_key) is self.space.UnicodeObjectCls:
self.switch_to_unicode_strategy(w_dict)
return
- elif type(w_key) is W_IntObject:
+ elif type(w_key) is self.space.IntObjectCls:
self.switch_to_int_strategy(w_dict)
- if self.space.compares_by_identity(w_key):
+ elif self.space.compares_by_identity(w_key):
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -49,6 +49,7 @@
self.FrameClass = frame.build_frame(self)
self.StringObjectCls = W_BytesObject
self.UnicodeObjectCls = W_UnicodeObject
+ self.IntObjectCls = W_IntObject
# singletons
self.w_None = W_NoneObject.w_None
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -1108,11 +1108,15 @@
w_float = float
StringObjectCls = FakeString
UnicodeObjectCls = FakeUnicode
+ IntObjectCls = int
w_dict = W_DictObject
iter = iter
fixedview = list
listview = list
+ def compares_by_identity(self, w_obj):
+ return False # safe default
+
class Config:
class objspace:
class std:
From pypy.commits at gmail.com Mon Aug 8 12:48:14 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 08 Aug 2016 09:48:14 -0700 (PDT)
Subject: [pypy-commit] pypy guard-compatible: make _type_issubtype function
actually elidable
Message-ID: <57a8b7ce.81cb1c0a.a9579.a326@mx.google.com>
Author: Carl Friedrich Bolz
Branch: guard-compatible
Changeset: r86084:8bfdd10930a7
Date: 2016-08-08 18:47 +0200
http://bitbucket.org/pypy/pypy/changeset/8bfdd10930a7/
Log: make _type_issubtype function actually elidable
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -348,7 +348,7 @@
return w_res
@jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version")
- def _type_issubtype(self, version, w_type):
+ def _type_issubtype(self, version, w_type, w_type_version):
from pypy.objspace.std.typeobject import _issubtype
return _issubtype(self.terminator.w_cls, w_type)
@@ -1173,7 +1173,7 @@
if map.version is not None:
version_tag = w_type.version_tag()
if version_tag is not None:
- return map._type_issubtype(w_type)
+ return map._type_issubtype(w_type, version_tag)
return space.type(w_obj).issubtype(w_type)
def mapdict_compares_by_identity(space, w_obj):
From pypy.commits at gmail.com Mon Aug 8 13:02:53 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 08 Aug 2016 10:02:53 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: remove setup_class in test_asyncio,
make better use of test_gil_issue
Message-ID: <57a8bb3d.151a1c0a.593ef.6364@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86085:9faaeb88a806
Date: 2016-08-08 19:02 +0200
http://bitbucket.org/pypy/pypy/changeset/9faaeb88a806/
Log: remove setup_class in test_asyncio, make better use of
test_gil_issue
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -2,16 +2,13 @@
spaceconfig = dict(usemodules=["select","_socket","thread","signal","struct","_multiprocessing","array","_posixsubprocess","fcntl","unicodedata"])
- def setup_class(cls):
- cls.space.appexec([], """():
+ def test_gil_issue(self):
+ # the problem occured at await asyncio.open_connection after calling run_until_complete
+ """
import encodings.idna
import asyncio
async def f():
reader, writer = await asyncio.open_connection('example.com', 80)
loop = asyncio.get_event_loop()
- loop.run_until_complete(f())""")
-
- def test_gil_issue(self):
- #needed to execute setup_call in the first place
- assert 1==1
\ No newline at end of file
+ loop.run_until_complete(f())"""
From pypy.commits at gmail.com Mon Aug 8 13:08:50 2016
From: pypy.commits at gmail.com (mattip)
Date: Mon, 08 Aug 2016 10:08:50 -0700 (PDT)
Subject: [pypy-commit] pypy default: fix test,
realloc frees the input ptr if it is realloc() ed
Message-ID: <57a8bca2.d4e41c0a.d14b6.5566@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86086:cffad389be1e
Date: 2016-08-08 20:07 +0300
http://bitbucket.org/pypy/pypy/changeset/cffad389be1e/
Log: fix test, realloc frees the input ptr if it is realloc() ed
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -245,12 +245,11 @@
char *copy, *orig = PyObject_MALLOC(12);
memcpy(orig, "hello world", 12);
copy = PyObject_REALLOC(orig, 15);
+ /* realloc() takes care of freeing orig, if changed */
if (copy == NULL)
Py_RETURN_NONE;
ret = PyString_FromStringAndSize(copy, 12);
- if (copy != orig)
- PyObject_Free(copy);
- PyObject_Free(orig);
+ PyObject_Free(copy);
return ret;
""")])
x = module.realloctest()
From pypy.commits at gmail.com Mon Aug 8 13:27:49 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 08 Aug 2016 10:27:49 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: We get nonsense with
stacklets, but avoid crashing, at least
Message-ID: <57a8c115.c19d1c0a.83f5f.6786@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86087:e39187569474
Date: 2016-08-08 15:08 +0200
http://bitbucket.org/pypy/pypy/changeset/e39187569474/
Log: We get nonsense with stacklets, but avoid crashing, at least
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -98,6 +98,9 @@
def leave_code(s):
if not we_are_translated():
+ # xxx this assertion may be false in the presence of
+ # stacklets, but let's assume we never run untranslated
+ # tests with stacklets and rvmprof
assert vmprof_tl_stack.getraw() == s
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
@@ -140,5 +143,9 @@
enter_code(unique_id) # ignore the return value
else:
s = vmprof_tl_stack.getraw()
- assert s.c_value == unique_id
- leave_code(s)
+ #assert s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG
+ #^^^ this is false in the presence of stacklets.
+ # we get random nonsense then; let's say it's ok for now
+ # and avoid crashing.
+ if s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG:
+ leave_code(s)
From pypy.commits at gmail.com Mon Aug 8 13:27:51 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 08 Aug 2016 10:27:51 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: JIT fix: make
'_vmprof_unique_id' an immutable field; I think
Message-ID: <57a8c117.c70a1c0a.ea799.6208@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86088:e09e83478469
Date: 2016-08-08 19:07 +0200
http://bitbucket.org/pypy/pypy/changeset/e09e83478469/
Log: JIT fix: make '_vmprof_unique_id' an immutable field; I think
decorated_function() reads it all the time, without this
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -46,6 +46,7 @@
def _cleanup_(self):
self.is_enabled = False
+ @jit.dont_look_inside
@specialize.argtype(1)
def register_code(self, code, full_name_func):
"""Register the code object. Call when a new code object is made.
@@ -87,6 +88,8 @@
if CodeClass in self._code_classes:
return
CodeClass._vmprof_unique_id = 0 # default value: "unknown"
+ immut = CodeClass.__dict__.get('_immutable_fields_', [])
+ CodeClass._immutable_fields_ = list(immut) + ['_vmprof_unique_id']
self._code_classes.add(CodeClass)
#
class WeakCodeObjectList(RWeakListMixin):
@@ -111,6 +114,7 @@
prev = self._gather_all_code_objs
self._gather_all_code_objs = gather_all_code_objs
+ @jit.dont_look_inside
def enable(self, fileno, interval):
"""Enable vmprof. Writes go to the given 'fileno'.
The sampling interval is given by 'interval' as a number of
@@ -131,6 +135,7 @@
raise VMProfError(os.strerror(rposix.get_saved_errno()))
self.is_enabled = True
+ @jit.dont_look_inside
def disable(self):
"""Disable vmprof.
Raises VMProfError if something goes wrong.
From pypy.commits at gmail.com Mon Aug 8 13:45:49 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 08 Aug 2016 10:45:49 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Update description of
dictproxyobject.py
Message-ID: <57a8c54d.68adc20a.25dd3.b582@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86089:d751064b79e4
Date: 2016-08-08 18:45 +0100
http://bitbucket.org/pypy/pypy/changeset/d751064b79e4/
Log: Update description of dictproxyobject.py
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,5 +1,8 @@
-# Read-only proxy for mappings. PyPy does not have a separate type for
-# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
+"""
+Read-only proxy for mappings.
+
+Its main use is as the return type of cls.__dict__.
+"""
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import oefmt
@@ -18,7 +21,8 @@
space.isinstance_w(w_mapping, space.w_list) or
space.isinstance_w(w_mapping, space.w_tuple)):
raise oefmt(space.w_TypeError,
- "mappingproxy() argument must be a mapping, not %T", w_mapping)
+ "mappingproxy() argument must be a mapping, not %T",
+ w_mapping)
return W_DictProxyObject(w_mapping)
def descr_init(self, space, __args__):
From pypy.commits at gmail.com Mon Aug 8 13:49:08 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 08 Aug 2016 10:49:08 -0700 (PDT)
Subject: [pypy-commit] pypy mappingproxy: Close branch mappingproxy
Message-ID: <57a8c614.44ce1c0a.a84b7.6d36@mx.google.com>
Author: Ronan Lamy
Branch: mappingproxy
Changeset: r86090:61f6e395e2e4
Date: 2016-08-08 18:48 +0100
http://bitbucket.org/pypy/pypy/changeset/61f6e395e2e4/
Log: Close branch mappingproxy
From pypy.commits at gmail.com Mon Aug 8 13:49:26 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 08 Aug 2016 10:49:26 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merged in mappingproxy (pull request #469)
Message-ID: <57a8c626.85c11c0a.6c2fc.7372@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86091:25c8dc3d6d4c
Date: 2016-08-08 18:48 +0100
http://bitbucket.org/pypy/pypy/changeset/25c8dc3d6d4c/
Log: Merged in mappingproxy (pull request #469)
Fix the mappingproxy type to behave as in CPython and consolidate
its duplicate implementations in cpyext and objspace into a single
one. Note that app-level cls.__dict__ and C-level cls->tp_dict now
return different objects with the former being an opaque (at app-
level) wrapper around the latter.
diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py
--- a/pypy/module/cppyy/pythonify.py
+++ b/pypy/module/cppyy/pythonify.py
@@ -175,7 +175,7 @@
"__new__" : make_new(class_name),
}
pycppclass = metacpp(class_name, _drop_cycles(bases), d)
-
+
# cache result early so that the class methods can find the class itself
setattr(scope, final_class_name, pycppclass)
@@ -192,13 +192,10 @@
for dm_name in cppclass.get_datamember_names():
cppdm = cppclass.get_datamember(dm_name)
- # here, setattr() can not be used, because a data member can shadow one in
- # its base class, resulting in the __set__() of its base class being called
- # by setattr(); so, store directly on the dictionary
- pycppclass.__dict__[dm_name] = cppdm
+ setattr(pycppclass, dm_name, cppdm)
import cppyy
if cppyy._is_static(cppdm): # TODO: make this a method of cppdm
- metacpp.__dict__[dm_name] = cppdm
+ setattr(metacpp, dm_name, cppdm)
# the call to register will add back-end specific pythonizations and thus
# needs to run first, so that the generic pythonizations can use them
@@ -413,7 +410,7 @@
lib = cppyy._load_dictionary(name)
_loaded_dictionaries[name] = lib
return lib
-
+
def _init_pythonify():
# cppyy should not be loaded at the module level, as that will trigger a
# call to space.getbuiltinmodule(), which will cause cppyy to be loaded
diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/module/cpyext/dictproxyobject.py
--- a/pypy/module/cpyext/dictproxyobject.py
+++ b/pypy/module/cpyext/dictproxyobject.py
@@ -1,67 +1,7 @@
-# Read-only proxy for mappings. PyPy does not have a separate type for
-# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
-
-from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
-from pypy.interpreter.typedef import TypeDef, interp2app
+from pypy.objspace.std.dictproxyobject import W_DictProxyObject
from pypy.module.cpyext.api import cpython_api, build_type_checkers
from pypy.module.cpyext.pyobject import PyObject
-class W_DictProxyObject(W_Root):
- "Read-only proxy for mappings."
-
- def __init__(self, w_mapping):
- self.w_mapping = w_mapping
-
- def descr_len(self, space):
- return space.len(self.w_mapping)
-
- def descr_getitem(self, space, w_key):
- return space.getitem(self.w_mapping, w_key)
-
- def descr_contains(self, space, w_key):
- return space.contains(self.w_mapping, w_key)
-
- def descr_iter(self, space):
- return space.iter(self.w_mapping)
-
- def descr_str(self, space):
- return space.str(self.w_mapping)
-
- def descr_repr(self, space):
- return space.repr(self.w_mapping)
-
- @unwrap_spec(w_default=WrappedDefault(None))
- def get_w(self, space, w_key, w_default):
- return space.call_method(self.w_mapping, "get", w_key, w_default)
-
- def keys_w(self, space):
- return space.call_method(self.w_mapping, "keys")
-
- def values_w(self, space):
- return space.call_method(self.w_mapping, "values")
-
- def items_w(self, space):
- return space.call_method(self.w_mapping, "items")
-
- def copy_w(self, space):
- return space.call_method(self.w_mapping, "copy")
-
-W_DictProxyObject.typedef = TypeDef(
- 'mappingproxy',
- __len__=interp2app(W_DictProxyObject.descr_len),
- __getitem__=interp2app(W_DictProxyObject.descr_getitem),
- __contains__=interp2app(W_DictProxyObject.descr_contains),
- __iter__=interp2app(W_DictProxyObject.descr_iter),
- __str__=interp2app(W_DictProxyObject.descr_str),
- __repr__=interp2app(W_DictProxyObject.descr_repr),
- get=interp2app(W_DictProxyObject.get_w),
- keys=interp2app(W_DictProxyObject.keys_w),
- values=interp2app(W_DictProxyObject.values_w),
- items=interp2app(W_DictProxyObject.items_w),
- copy=interp2app(W_DictProxyObject.copy_w)
-)
-
PyDictProxy_Check, PyDictProxy_CheckExact = build_type_checkers(
"DictProxy", W_DictProxyObject)
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -277,9 +277,41 @@
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
- ''')])
+ '''),
+ ("get_type_dict", "METH_O",
+ '''
+ PyObject* value = args->ob_type->tp_dict;
+ if (value == NULL) value = Py_None;
+ Py_INCREF(value);
+ return value;
+ '''),
+ ])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ class A(object):
+ pass
+ obj = A()
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ d = module.get_type_dict(1)
+ assert type(d) is dict
+ try:
+ d["_some_attribute"] = 1
+ except TypeError: # on PyPy, int.__dict__ is really immutable
+ pass
+ else:
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
@@ -348,6 +380,21 @@
api.Py_DecRef(ref)
+ def test_type_dict(self, space, api):
+ w_class = space.appexec([], """():
+ class A(object):
+ pass
+ return A
+ """)
+ ref = make_ref(space, w_class)
+
+ py_type = rffi.cast(PyTypeObjectPtr, ref)
+ w_dict = from_ref(space, py_type.c_tp_dict)
+ w_name = space.newunicode(u'a')
+ space.setitem(w_dict, w_name, space.wrap(1))
+ assert space.int_w(space.getattr(w_class, w_name)) == 1
+ space.delitem(w_dict, w_name)
+
def test_multiple_inheritance(self, space, api):
w_class = space.appexec([], """():
class A(object):
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -192,7 +192,7 @@
py_methoddescr.c_d_method = w_obj.ml
def classmethoddescr_realize(space, obj):
- # XXX NOT TESTED When is this ever called?
+ # XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCClassMethodObject, w_type)
@@ -201,7 +201,7 @@
return w_obj
def methoddescr_realize(space, obj):
- # XXX NOT TESTED When is this ever called?
+ # XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCMethodObject, w_type)
@@ -272,12 +272,12 @@
if len(slot_names) == 1:
if not getattr(pto, slot_names[0]):
setattr(pto, slot_names[0], slot_func_helper)
- elif (w_type.getname(space) in ('list', 'tuple') and
+ elif (w_type.getname(space) in ('list', 'tuple') and
slot_names[0] == 'c_tp_as_number'):
# XXX hack - hwo can we generalize this? The problem is method
# names like __mul__ map to more than one slot, and we have no
# convenient way to indicate which slots CPython have filled
- #
+ #
# We need at least this special case since Numpy checks that
# (list, tuple) do __not__ fill tp_as_number
pass
@@ -767,8 +767,8 @@
if w_obj.is_cpytype():
Py_DecRef(space, pto.c_tp_dict)
- w_dict = w_obj.getdict(space)
- pto.c_tp_dict = make_ref(space, w_dict)
+ w_dict = w_obj.getdict(space)
+ pto.c_tp_dict = make_ref(space, w_dict)
@cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyType_IsSubtype(space, a, b):
diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/classdict.py
@@ -0,0 +1,119 @@
+from rpython.rlib import rerased
+from rpython.rlib.objectmodel import iteritems_with_hash
+
+from pypy.interpreter.error import OperationError, oefmt
+from pypy.objspace.std.dictmultiobject import (
+ DictStrategy, create_iterator_classes)
+from pypy.objspace.std.typeobject import unwrap_cell
+
+
+class ClassDictStrategy(DictStrategy):
+ """Exposes a W_TypeObject.dict_w at app-level.
+
+ Uses getdictvalue() and setdictvalue() to access items.
+ """
+ erase, unerase = rerased.new_erasing_pair("dictproxy")
+ erase = staticmethod(erase)
+ unerase = staticmethod(unerase)
+
+ def getitem(self, w_dict, w_key):
+ space = self.space
+ w_lookup_type = space.type(w_key)
+ if space.issubtype_w(w_lookup_type, space.w_unicode):
+ return self.getitem_str(w_dict, space.str_w(w_key))
+ else:
+ return None
+
+ def getitem_str(self, w_dict, key):
+ return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
+
+ def setitem(self, w_dict, w_key, w_value):
+ space = self.space
+ if space.is_w(space.type(w_key), space.w_unicode):
+ self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
+ else:
+ raise oefmt(space.w_TypeError,
+ "cannot add non-string keys to dict of a type")
+
+ def setitem_str(self, w_dict, key, w_value):
+ w_type = self.unerase(w_dict.dstorage)
+ try:
+ w_type.setdictvalue(self.space, key, w_value)
+ except OperationError as e:
+ if not e.match(self.space, self.space.w_TypeError):
+ raise
+ if not w_type.is_cpytype():
+ raise
+ # Allow cpyext to write to type->tp_dict even in the case
+ # of a builtin type.
+ # Like CPython, we assume that this is only done early
+ # after the type is created, and we don't invalidate any
+ # cache. User code shoud call PyType_Modified().
+ w_type.dict_w[key] = w_value
+
+ def setdefault(self, w_dict, w_key, w_default):
+ w_result = self.getitem(w_dict, w_key)
+ if w_result is not None:
+ return w_result
+ self.setitem(w_dict, w_key, w_default)
+ return w_default
+
+ def delitem(self, w_dict, w_key):
+ space = self.space
+ w_key_type = space.type(w_key)
+ if space.is_w(w_key_type, space.w_unicode):
+ key = self.space.str_w(w_key)
+ if not self.unerase(w_dict.dstorage).deldictvalue(space, key):
+ raise KeyError
+ else:
+ raise KeyError
+
+ def length(self, w_dict):
+ return len(self.unerase(w_dict.dstorage).dict_w)
+
+ def w_keys(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ return space.newlist([_wrapkey(space, key)
+ for key in w_type.dict_w.iterkeys()])
+
+ def values(self, w_dict):
+ return [unwrap_cell(self.space, w_value) for w_value in
+ self.unerase(w_dict.dstorage).dict_w.itervalues()]
+
+ def items(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ return [space.newtuple([_wrapkey(space, key),
+ unwrap_cell(space, w_value)])
+ for (key, w_value) in w_type.dict_w.iteritems()]
+
+ def clear(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ if not w_type.is_heaptype():
+ raise oefmt(space.w_TypeError,
+ "can't clear dictionary of type '%N'", w_type)
+ w_type.dict_w.clear()
+ w_type.mutated(None)
+
+ def getiterkeys(self, w_dict):
+ return self.unerase(w_dict.dstorage).dict_w.iterkeys()
+
+ def getitervalues(self, w_dict):
+ return self.unerase(w_dict.dstorage).dict_w.itervalues()
+
+ def getiteritems_with_hash(self, w_dict):
+ return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
+
+ def wrapkey(space, key):
+ return _wrapkey(space, key)
+
+ def wrapvalue(space, value):
+ return unwrap_cell(space, value)
+
+def _wrapkey(space, key):
+ # keys are utf-8 encoded identifiers from type's dict_w
+ return space.wrap(key.decode('utf-8'))
+
+create_iterator_classes(ClassDictStrategy)
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,212 +1,95 @@
-from rpython.rlib import rerased
-from rpython.rlib.objectmodel import iteritems_with_hash
+"""
+Read-only proxy for mappings.
-from pypy.interpreter.error import OperationError, oefmt
-from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.typedef import TypeDef
-from pypy.objspace.std.dictmultiobject import (
- DictStrategy, W_DictObject, create_iterator_classes)
-from pypy.objspace.std.typeobject import unwrap_cell
+Its main use is as the return type of cls.__dict__.
+"""
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
+from pypy.interpreter.typedef import TypeDef, interp2app
-class W_DictProxyObject(W_DictObject):
+class W_DictProxyObject(W_Root):
+ "Read-only proxy for mappings."
+
+ def __init__(self, w_mapping):
+ self.w_mapping = w_mapping
+
@staticmethod
def descr_new(space, w_type, w_mapping):
if (not space.lookup(w_mapping, "__getitem__") or
- space.isinstance_w(w_mapping, space.w_list) or
- space.isinstance_w(w_mapping, space.w_tuple)):
+ space.isinstance_w(w_mapping, space.w_list) or
+ space.isinstance_w(w_mapping, space.w_tuple)):
raise oefmt(space.w_TypeError,
- "mappingproxy() argument must be a mapping, not %T", w_mapping)
- strategy = space.fromcache(MappingProxyStrategy)
- storage = strategy.erase(w_mapping)
- w_obj = space.allocate_instance(W_DictProxyObject, w_type)
- W_DictProxyObject.__init__(w_obj, space, strategy, storage)
- return w_obj
+ "mappingproxy() argument must be a mapping, not %T",
+ w_mapping)
+ return W_DictProxyObject(w_mapping)
def descr_init(self, space, __args__):
pass
+ def descr_len(self, space):
+ return space.len(self.w_mapping)
+
+ def descr_getitem(self, space, w_key):
+ return space.getitem(self.w_mapping, w_key)
+
+ def descr_contains(self, space, w_key):
+ return space.contains(self.w_mapping, w_key)
+
+ def descr_iter(self, space):
+ return space.iter(self.w_mapping)
+
+ def descr_str(self, space):
+ return space.str(self.w_mapping)
+
def descr_repr(self, space):
- return space.wrap(u"mappingproxy(%s)" % (
- space.unicode_w(W_DictObject.descr_repr(self, space))))
+ return space.newunicode(u"mappingproxy(%s)" %
+ (space.unicode_w(space.repr(self.w_mapping)),))
+
+ @unwrap_spec(w_default=WrappedDefault(None))
+ def get_w(self, space, w_key, w_default):
+ return space.call_method(self.w_mapping, "get", w_key, w_default)
+
+ def keys_w(self, space):
+ return space.call_method(self.w_mapping, "keys")
+
+ def values_w(self, space):
+ return space.call_method(self.w_mapping, "values")
+
+ def items_w(self, space):
+ return space.call_method(self.w_mapping, "items")
+
+ def copy_w(self, space):
+ return space.call_method(self.w_mapping, "copy")
+
+cmp_methods = {}
+def make_cmp_method(op):
+ def descr_op(self, space, w_other):
+ return getattr(space, op)(self.w_mapping, w_other)
+ descr_name = 'descr_' + op
+ descr_op.__name__ = descr_name
+ setattr(W_DictProxyObject, descr_name, descr_op)
+ cmp_methods['__%s__' % op] = interp2app(getattr(W_DictProxyObject, descr_name))
+
+for op in ['eq', 'ne', 'gt', 'ge', 'lt', 'le']:
+ make_cmp_method(op)
+
W_DictProxyObject.typedef = TypeDef(
- "mappingproxy", W_DictObject.typedef,
- __new__ = interp2app(W_DictProxyObject.descr_new),
- __init__ = interp2app(W_DictProxyObject.descr_init),
- __repr__ = interp2app(W_DictProxyObject.descr_repr),
+ 'mappingproxy',
+ __new__=interp2app(W_DictProxyObject.descr_new),
+ __init__=interp2app(W_DictProxyObject.descr_init),
+ __len__=interp2app(W_DictProxyObject.descr_len),
+ __getitem__=interp2app(W_DictProxyObject.descr_getitem),
+ __contains__=interp2app(W_DictProxyObject.descr_contains),
+ __iter__=interp2app(W_DictProxyObject.descr_iter),
+ __str__=interp2app(W_DictProxyObject.descr_str),
+ __repr__=interp2app(W_DictProxyObject.descr_repr),
+ get=interp2app(W_DictProxyObject.get_w),
+ keys=interp2app(W_DictProxyObject.keys_w),
+ values=interp2app(W_DictProxyObject.values_w),
+ items=interp2app(W_DictProxyObject.items_w),
+ copy=interp2app(W_DictProxyObject.copy_w),
+ **cmp_methods
)
-
-
-class DictProxyStrategy(DictStrategy):
- """Exposes a W_TypeObject.dict_w at app-level.
-
- Uses getdictvalue() and setdictvalue() to access items.
- """
- erase, unerase = rerased.new_erasing_pair("dictproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
-
- def getitem(self, w_dict, w_key):
- space = self.space
- w_lookup_type = space.type(w_key)
- if space.issubtype_w(w_lookup_type, space.w_unicode):
- return self.getitem_str(w_dict, space.str_w(w_key))
- else:
- return None
-
- def getitem_str(self, w_dict, key):
- return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
-
- def setitem(self, w_dict, w_key, w_value):
- space = self.space
- if space.is_w(space.type(w_key), space.w_unicode):
- self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
- else:
- raise oefmt(space.w_TypeError,
- "cannot add non-string keys to dict of a type")
-
- def setitem_str(self, w_dict, key, w_value):
- w_type = self.unerase(w_dict.dstorage)
- try:
- w_type.setdictvalue(self.space, key, w_value)
- except OperationError as e:
- if not e.match(self.space, self.space.w_TypeError):
- raise
- if not w_type.is_cpytype():
- raise
- # Allow cpyext to write to type->tp_dict even in the case
- # of a builtin type.
- # Like CPython, we assume that this is only done early
- # after the type is created, and we don't invalidate any
- # cache. User code shoud call PyType_Modified().
- w_type.dict_w[key] = w_value
-
- def setdefault(self, w_dict, w_key, w_default):
- w_result = self.getitem(w_dict, w_key)
- if w_result is not None:
- return w_result
- self.setitem(w_dict, w_key, w_default)
- return w_default
-
- def delitem(self, w_dict, w_key):
- space = self.space
- w_key_type = space.type(w_key)
- if space.is_w(w_key_type, space.w_unicode):
- key = self.space.str_w(w_key)
- if not self.unerase(w_dict.dstorage).deldictvalue(space, key):
- raise KeyError
- else:
- raise KeyError
-
- def length(self, w_dict):
- return len(self.unerase(w_dict.dstorage).dict_w)
-
- def w_keys(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- return space.newlist([_wrapkey(space, key)
- for key in w_type.dict_w.iterkeys()])
-
- def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
-
- def items(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- return [space.newtuple([_wrapkey(space, key),
- unwrap_cell(space, w_value)])
- for (key, w_value) in w_type.dict_w.iteritems()]
-
- def clear(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- if not w_type.is_heaptype():
- raise oefmt(space.w_TypeError,
- "can't clear dictionary of type '%N'", w_type)
- w_type.dict_w.clear()
- w_type.mutated(None)
-
- def getiterkeys(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.iterkeys()
- def getitervalues(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.itervalues()
- def getiteritems_with_hash(self, w_dict):
- return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
- def wrapkey(space, key):
- return _wrapkey(space, key)
- def wrapvalue(space, value):
- return unwrap_cell(space, value)
-
-def _wrapkey(space, key):
- # keys are utf-8 encoded identifiers from type's dict_w
- return space.wrap(key.decode('utf-8'))
-
-create_iterator_classes(DictProxyStrategy)
-
-
-class MappingProxyStrategy(DictStrategy):
- """Wraps an applevel mapping in a read-only dictionary."""
- erase, unerase = rerased.new_erasing_pair("mappingproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
-
- def getitem(self, w_dict, w_key):
- try:
- return self.space.getitem(self.unerase(w_dict.dstorage), w_key)
- except OperationError as e:
- if not e.match(self.space, self.space.w_KeyError):
- raise
- return None
-
- def setitem(self, w_dict, w_key, w_value):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item assignment", w_dict)
-
- def delitem(self, w_dict, w_key):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item deletion", w_dict)
-
- def length(self, w_dict):
- return self.space.len_w(self.unerase(w_dict.dstorage))
-
- def getiterkeys(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "keys"))
-
- def getitervalues(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "values"))
-
- def getiteritems_with_hash(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "items"))
-
- @staticmethod
- def override_next_key(iterkeys):
- w_keys = iterkeys.iterator
- return iterkeys.space.next(w_keys)
-
- @staticmethod
- def override_next_value(itervalues):
- w_values = itervalues.iterator
- return itervalues.space.next(w_values)
-
- @staticmethod
- def override_next_item(iteritems):
- w_items = iteritems.iterator
- w_item = iteritems.space.next(w_items)
- w_key, w_value = iteritems.space.unpackiterable(w_item, 2)
- return w_key, w_value
-
- def clear(self, w_dict):
- raise oefmt(self.space.w_AttributeError, "clear")
-
- def copy(self, w_dict):
- return self.space.call_method(self.unerase(w_dict.dstorage), "copy")
-
-create_iterator_classes(
- MappingProxyStrategy,
- override_next_key=MappingProxyStrategy.override_next_key,
- override_next_value=MappingProxyStrategy.override_next_value,
- override_next_item=MappingProxyStrategy.override_next_item)
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -9,42 +9,20 @@
assert 'a' in NotEmpty.__dict__
assert 'a' in NotEmpty.__dict__.keys()
assert 'b' not in NotEmpty.__dict__
- NotEmpty.__dict__['b'] = 4
- assert NotEmpty.b == 4
- del NotEmpty.__dict__['b']
assert NotEmpty.__dict__.get("b") is None
+ raises(TypeError, "NotEmpty.__dict__['b'] = 4")
raises(TypeError, 'NotEmpty.__dict__[15] = "y"')
- raises(KeyError, 'del NotEmpty.__dict__[15]')
+ raises(TypeError, 'del NotEmpty.__dict__[15]')
- assert NotEmpty.__dict__.setdefault("string", 1) == 1
- assert NotEmpty.__dict__.setdefault("string", 2) == 1
- assert NotEmpty.string == 1
- raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
-
- def test_dictproxy_popitem(self):
- class A(object):
- a = 42
- seen = 0
- try:
- while True:
- key, value = A.__dict__.popitem()
- if key == 'a':
- assert value == 42
- seen += 1
- except KeyError:
- pass
- assert seen == 1
+ raises(AttributeError, 'NotEmpty.__dict__.setdefault')
def test_dictproxy_getitem(self):
class NotEmpty(object):
a = 1
assert 'a' in NotEmpty.__dict__
- class substr(str): pass
+ class substr(str):
+ pass
assert substr('a') in NotEmpty.__dict__
- # the following are only for py2
- ## assert u'a' in NotEmpty.__dict__
- ## assert NotEmpty.__dict__[u'a'] == 1
- ## assert u'\xe9' not in NotEmpty.__dict__
def test_dictproxyeq(self):
class a(object):
@@ -63,9 +41,9 @@
class a(object):
pass
s1 = repr(a.__dict__)
+ assert s1.startswith('mappingproxy({') and s1.endswith('})')
s2 = str(a.__dict__)
- assert s1 == s2
- assert s1.startswith('mappingproxy({') and s1.endswith('})')
+ assert s1 == 'mappingproxy(%s)' % s2
def test_immutable_dict_on_builtin_type(self):
raises(TypeError, "int.__dict__['a'] = 1")
@@ -100,4 +78,3 @@
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
-
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -970,7 +970,6 @@
raises(TypeError, setattr, list, 'foobar', 42)
raises(TypeError, delattr, dict, 'keys')
raises(TypeError, 'int.__dict__["a"] = 1')
- raises(TypeError, 'int.__dict__.clear()')
def test_nontype_in_mro(self):
class OldStyle:
@@ -1028,10 +1027,9 @@
pass
a = A()
+ d = A.__dict__
A.x = 1
- assert A.__dict__["x"] == 1
- A.__dict__['x'] = 5
- assert A.x == 5
+ assert d["x"] == 1
def test_we_already_got_one_1(self):
# Issue #2079: highly obscure: CPython complains if we say
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -4,8 +4,8 @@
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import (
Function, StaticMethod, ClassMethod, FunctionWithFixedCode)
-from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
- descr_get_dict, dict_descr, Member, TypeDef
+from pypy.interpreter.typedef import (
+ weakref_descr, GetSetProperty, dict_descr, Member, TypeDef)
from pypy.interpreter.astcompiler.misc import mangle
from pypy.module.__builtin__ import abstractinst
@@ -344,7 +344,7 @@
def deldictvalue(self, space, key):
if self.lazyloaders:
self._cleanup_() # force un-lazification
- if not self.is_heaptype():
+ if not (self.is_heaptype() or self.is_cpytype()):
raise oefmt(space.w_TypeError,
"can't delete attributes on type object '%N'", self)
try:
@@ -483,14 +483,14 @@
self.getdictvalue(self.space, attr)
del self.lazyloaders
- def getdict(self, space): # returning a dict-proxy!
- from pypy.objspace.std.dictproxyobject import DictProxyStrategy
- from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ def getdict(self, space):
+ from pypy.objspace.std.classdict import ClassDictStrategy
+ from pypy.objspace.std.dictmultiobject import W_DictObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
- strategy = space.fromcache(DictProxyStrategy)
+ strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- return W_DictProxyObject(space, strategy, storage)
+ return W_DictObject(space, strategy, storage)
def is_heaptype(self):
return self.flag_heaptype
@@ -929,6 +929,13 @@
return space.newbool(
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
+def type_get_dict(space, w_cls):
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ w_dict = w_cls.getdict(space)
+ if w_dict is None:
+ return space.w_None
+ return W_DictProxyObject(w_dict)
+
W_TypeObject.typedef = TypeDef("type",
__new__ = gateway.interp2app(descr__new__),
__name__ = GetSetProperty(descr_get__name__, descr_set__name__),
@@ -936,7 +943,7 @@
__bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__),
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
- __dict__ = GetSetProperty(descr_get_dict),
+ __dict__=GetSetProperty(type_get_dict),
__doc__ = GetSetProperty(descr__doc, descr_set__doc),
__dir__ = gateway.interp2app(descr__dir),
mro = gateway.interp2app(descr_mro),
From pypy.commits at gmail.com Mon Aug 8 13:56:21 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 08 Aug 2016 10:56:21 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: Tentative proper fix for
stacklet+vmprof
Message-ID: <57a8c7c5.68adc20a.25dd3.b8a8@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86092:68cb0f468f13
Date: 2016-08-08 19:55 +0200
http://bitbucket.org/pypy/pypy/changeset/68cb0f468f13/
Log: Tentative proper fix for stacklet+vmprof
diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py
--- a/rpython/rlib/rstacklet.py
+++ b/rpython/rlib/rstacklet.py
@@ -3,6 +3,7 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import fetch_translated_config
from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.rlib.rvmprof import cintf
DEBUG = False
@@ -24,7 +25,12 @@
def new(self, callback, arg=llmemory.NULL):
if DEBUG:
callback = _debug_wrapper(callback)
- h = self._gcrootfinder.new(self, callback, arg)
+ x = cintf.save_rvmprof_stack()
+ try:
+ cintf.empty_rvmprof_stack()
+ h = self._gcrootfinder.new(self, callback, arg)
+ finally:
+ cintf.restore_rvmprof_stack(x)
if DEBUG:
debug.add(h)
return h
@@ -34,7 +40,11 @@
def switch(self, stacklet):
if DEBUG:
debug.remove(stacklet)
- h = self._gcrootfinder.switch(stacklet)
+ x = cintf.save_rvmprof_stack()
+ try:
+ h = self._gcrootfinder.switch(stacklet)
+ finally:
+ cintf.restore_rvmprof_stack(x)
if DEBUG:
debug.add(h)
return h
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -98,9 +98,6 @@
def leave_code(s):
if not we_are_translated():
- # xxx this assertion may be false in the presence of
- # stacklets, but let's assume we never run untranslated
- # tests with stacklets and rvmprof
assert vmprof_tl_stack.getraw() == s
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
@@ -143,9 +140,17 @@
enter_code(unique_id) # ignore the return value
else:
s = vmprof_tl_stack.getraw()
- #assert s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG
- #^^^ this is false in the presence of stacklets.
- # we get random nonsense then; let's say it's ok for now
- # and avoid crashing.
- if s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG:
- leave_code(s)
+ assert s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG
+ leave_code(s)
+
+#
+# stacklet support
+
+def save_rvmprof_stack():
+ return vmprof_tl_stack.get_or_make_raw()
+
+def empty_rvmprof_stack():
+ vmprof_tl_stack.setraw(lltype.nullptr(VMPROFSTACK))
+
+def restore_rvmprof_stack(x):
+ vmprof_tl_stack.setraw(x)
From pypy.commits at gmail.com Tue Aug 9 03:42:54 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 00:42:54 -0700 (PDT)
Subject: [pypy-commit] pypy improve-vmprof-testing: close branch,
ready to merge
Message-ID: <57a9897e.c62f1c0a.88ea8.92f3@mx.google.com>
Author: Armin Rigo
Branch: improve-vmprof-testing
Changeset: r86093:36f80e5d1a22
Date: 2016-08-09 09:39 +0200
http://bitbucket.org/pypy/pypy/changeset/36f80e5d1a22/
Log: close branch, ready to merge
From pypy.commits at gmail.com Tue Aug 9 03:42:56 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 00:42:56 -0700 (PDT)
Subject: [pypy-commit] pypy default: hg merge improve-vmprof-testing
Message-ID: <57a98980.469d1c0a.f359e.9445@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86094:7b02cc6e8bfd
Date: 2016-08-09 09:42 +0200
http://bitbucket.org/pypy/pypy/changeset/7b02cc6e8bfd/
Log: hg merge improve-vmprof-testing
Improved vmprof support: now tries hard to not miss any Python-level
frame in the captured stacks, even if there is the metainterp or
blackhole interp involved.
Also fix the stacklet (greenlet) support.
Contains some sanity checks that will fail an assertion, even when
running without vmprof enabled.
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -7,64 +7,152 @@
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
+
class BaseRVMProfTest(object):
- def test_one(self):
- py.test.skip("needs thread-locals in the JIT, which is only available "
- "after translation")
+
+ def setup_method(self, meth):
visited = []
def helper():
+ trace = []
stack = cintf.vmprof_tl_stack.getraw()
- print stack
- if stack:
- # not during tracing
- visited.append(stack.c_value)
- else:
- visited.append(0)
+ while stack:
+ trace.append((stack.c_kind, stack.c_value))
+ stack = stack.c_next
+ visited.append(trace)
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=['code'], reds='auto')
+ class CodeObj(object):
+ def __init__(self, name):
+ self.name = name
- class CodeObj(object):
- pass
-
- def get_code_fn(code, arg):
+ def get_code_fn(codes, code, arg, c):
return code
def get_name(code):
return "foo"
+ _get_vmprof().use_weaklist = False
register_code_object_class(CodeObj, get_name)
- @vmprof_execute_code("main", get_code_fn)
- def f(code, n):
+ self.misc = visited, llfn, CodeObj, get_code_fn, get_name
+
+
+ def teardown_method(self, meth):
+ del _get_vmprof().use_weaklist
+
+
+ def test_simple(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
i = 0
while i < n:
- driver.jit_merge_point(code=code)
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ c = f(codes, codes[1], 1, c)
+ else:
+ llfn()
+ c -= 1
i += 1
- llfn()
+ return c
def main(n):
- cintf.vmprof_tl_stack.setraw(null) # make it empty
- vmprof = _get_vmprof()
- code = CodeObj()
- register_code(code, get_name)
- return f(code, n)
-
- class Hooks(jit.JitHookInterface):
- def after_compile(self, debug_info):
- self.raw_start = debug_info.asminfo.rawstart
-
- hooks = Hooks()
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ return f(codes, codes[0], n, 8)
null = lltype.nullptr(cintf.VMPROFSTACK)
- self.meta_interp(main, [10], policy=JitPolicy(hooks))
- print visited
- #v = set(visited)
- #assert 0 in v
- #v.remove(0)
- #assert len(v) == 1
- #assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- #assert cintf.vmprof_tl_stack.getraw() == null
- # ^^^ make sure we didn't leave anything dangling
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception_in_blackhole(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while True:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if i >= n:
+ break
+ i += 1
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
+ else:
+ llfn()
+ c -= 1
+ if c & 1: # a failing guard
+ pass
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -452,6 +452,8 @@
prepare = self._handle_math_sqrt_call
elif oopspec_name.startswith('rgc.'):
prepare = self._handle_rgc_call
+ elif oopspec_name.startswith('rvmprof.'):
+ prepare = self._handle_rvmprof_call
elif oopspec_name.endswith('dict.lookup'):
# also ordereddict.lookup
prepare = self._handle_dict_lookup_call
@@ -2079,6 +2081,32 @@
else:
raise NotImplementedError(oopspec_name)
+ def _handle_rvmprof_call(self, op, oopspec_name, args):
+ if oopspec_name != 'rvmprof.jitted':
+ raise NotImplementedError(oopspec_name)
+ c_entering = Constant(0, lltype.Signed)
+ c_leaving = Constant(1, lltype.Signed)
+ v_uniqueid = args[0]
+ op1 = SpaceOperation('rvmprof_code', [c_entering, v_uniqueid], None)
+ op2 = SpaceOperation('rvmprof_code', [c_leaving, v_uniqueid], None)
+ #
+ # fish fish inside the oopspec's graph for the ll_func pointer
+ block = op.args[0].value._obj.graph.startblock
+ while True:
+ assert len(block.exits) == 1
+ nextblock = block.exits[0].target
+ if nextblock.operations == ():
+ break
+ block = nextblock
+ last_op = block.operations[-1]
+ assert last_op.opname == 'direct_call'
+ c_ll_func = last_op.args[0]
+ #
+ args = [c_ll_func] + op.args[2:]
+ ops = self.rewrite_op_direct_call(SpaceOperation('direct_call',
+ args, op.result))
+ return [op1] + ops + [op2]
+
def rewrite_op_ll_read_timestamp(self, op):
op1 = self.prepare_builtin_call(op, "ll_read_timestamp", [])
return self.handle_residual_call(op1,
diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py
--- a/rpython/jit/codewriter/test/test_flatten.py
+++ b/rpython/jit/codewriter/test/test_flatten.py
@@ -14,7 +14,7 @@
from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_longlong, r_ulonglong
from rpython.rlib.jit import dont_look_inside, _we_are_jitted, JitDriver
from rpython.rlib.objectmodel import keepalive_until_here
-from rpython.rlib import jit
+from rpython.rlib import jit, debug
class FakeRegAlloc:
@@ -140,7 +140,6 @@
def encoding_test(self, func, args, expected,
transform=False, liveness=False, cc=None, jd=None):
-
graphs = self.make_graphs(func, args)
#graphs[0].show()
if transform:
@@ -1112,6 +1111,31 @@
assert str(e.value).startswith("A virtualizable array is passed aroun")
assert "" in str(e.value)
+ def test_rvmprof_code(self):
+ from rpython.rlib.rvmprof import cintf
+ class MyFakeCallControl(FakeCallControl):
+ def guess_call_kind(self, op):
+ if 'jitted' in repr(op):
+ return 'builtin'
+ return 'residual'
+ class X:
+ pass
+ def g(x, y):
+ debug.debug_print("foo")
+ return X()
+ @jit.oopspec("rvmprof.jitted(unique_id)")
+ def decorated_jitted_function(unique_id, *args):
+ return g(*args)
+ def f(id, x, y):
+ return decorated_jitted_function(id, x, y)
+ self.encoding_test(f, [42, 56, 74], """
+ rvmprof_code $0, %i0
+ residual_call_ir_r $<* fn g>, I[%i1, %i2], R[], -> %r0
+ -live-
+ rvmprof_code $1, %i0
+ ref_return %r0
+ """, transform=True, cc=MyFakeCallControl())
+
def check_force_cast(FROM, TO, operations, value):
"""Check that the test is correctly written..."""
diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -64,6 +64,7 @@
assert self._insns[value] is None
self._insns[value] = key
self.op_catch_exception = insns.get('catch_exception/L', -1)
+ self.op_rvmprof_code = insns.get('rvmprof_code/ii', -1)
#
all_funcs = []
for key in self._insns:
@@ -270,6 +271,7 @@
self.dispatch_loop = builder.dispatch_loop
self.descrs = builder.descrs
self.op_catch_exception = builder.op_catch_exception
+ self.op_rvmprof_code = builder.op_rvmprof_code
self.count_interpreter = count_interpreter
#
if we_are_translated():
@@ -373,9 +375,32 @@
target = ord(code[position+1]) | (ord(code[position+2])<<8)
self.position = target
return
+ if opcode == self.op_rvmprof_code:
+ # call the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode the 'rvmprof_code' insn
+ # manually here.
+ from rpython.rlib.rvmprof import cintf
+ arg1 = self.registers_i[ord(code[position + 1])]
+ arg2 = self.registers_i[ord(code[position + 2])]
+ assert arg1 == 1
+ cintf.jit_rvmprof_code(arg1, arg2)
# no 'catch_exception' insn follows: just reraise
reraise(e)
+ def handle_rvmprof_enter(self):
+ code = self.jitcode.code
+ position = self.position
+ opcode = ord(code[position])
+ if opcode == self.op_rvmprof_code:
+ arg1 = self.registers_i[ord(code[position + 1])]
+ arg2 = self.registers_i[ord(code[position + 2])]
+ if arg1 == 1:
+ # we are resuming at a position that will do a
+ # jit_rvmprof_code(1), when really executed. That's a
+ # hint for the need for a jit_rvmprof_code(0).
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(0, arg2)
+
def copy_constants(self, registers, constants):
"""Copy jitcode.constants[0] to registers[255],
jitcode.constants[1] to registers[254],
@@ -1501,6 +1526,11 @@
def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length):
cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length)
+ @arguments("i", "i")
+ def bhimpl_rvmprof_code(leaving, unique_id):
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(leaving, unique_id)
+
# ----------
# helpers to resume running in blackhole mode when a guard failed
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -1453,6 +1453,25 @@
metainterp.history.record(rop.VIRTUAL_REF_FINISH,
[vrefbox, nullbox], None)
+ @arguments("int", "box")
+ def opimpl_rvmprof_code(self, leaving, box_unique_id):
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(leaving, box_unique_id.getint())
+
+ def handle_rvmprof_enter_on_resume(self):
+ code = self.bytecode
+ position = self.pc
+ opcode = ord(code[position])
+ if opcode == self.metainterp.staticdata.op_rvmprof_code:
+ arg1 = self.registers_i[ord(code[position + 1])].getint()
+ arg2 = self.registers_i[ord(code[position + 2])].getint()
+ if arg1 == 1:
+ # we are resuming at a position that will do a
+ # jit_rvmprof_code(1), when really executed. That's a
+ # hint for the need for a jit_rvmprof_code(0).
+ from rpython.rlib.rvmprof import cintf
+ cintf.jit_rvmprof_code(0, arg2)
+
# ------------------------------
def setup_call(self, argboxes):
@@ -1804,6 +1823,7 @@
opimpl = _get_opimpl_method(name, argcodes)
self.opcode_implementations[value] = opimpl
self.op_catch_exception = insns.get('catch_exception/L', -1)
+ self.op_rvmprof_code = insns.get('rvmprof_code/ii', -1)
def setup_descrs(self, descrs):
self.opcode_descrs = descrs
@@ -2071,6 +2091,15 @@
target = ord(code[position+1]) | (ord(code[position+2])<<8)
frame.pc = target
raise ChangeFrame
+ if opcode == self.staticdata.op_rvmprof_code:
+ # call the 'jit_rvmprof_code(1)' for rvmprof, but then
+ # continue popping frames. Decode the 'rvmprof_code' insn
+ # manually here.
+ from rpython.rlib.rvmprof import cintf
+ arg1 = frame.registers_i[ord(code[position + 1])].getint()
+ arg2 = frame.registers_i[ord(code[position + 2])].getint()
+ assert arg1 == 1
+ cintf.jit_rvmprof_code(arg1, arg2)
self.popframe()
try:
self.compile_exit_frame_with_exception(self.last_exc_box)
diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py
--- a/rpython/jit/metainterp/resume.py
+++ b/rpython/jit/metainterp/resume.py
@@ -1058,6 +1058,7 @@
f.setup_resume_at_op(pc)
resumereader.consume_boxes(f.get_current_position_info(),
f.registers_i, f.registers_r, f.registers_f)
+ f.handle_rvmprof_enter_on_resume()
return resumereader.liveboxes, virtualizable_boxes, virtualref_boxes
@@ -1343,6 +1344,7 @@
jitcode = jitcodes[jitcode_pos]
curbh.setposition(jitcode, pc)
resumereader.consume_one_section(curbh)
+ curbh.handle_rvmprof_enter()
return curbh
def force_from_resumedata(metainterp_sd, storage, deadframe, vinfo, ginfo):
diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py
--- a/rpython/rlib/rstacklet.py
+++ b/rpython/rlib/rstacklet.py
@@ -3,6 +3,7 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import fetch_translated_config
from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.rlib.rvmprof import cintf
DEBUG = False
@@ -24,7 +25,12 @@
def new(self, callback, arg=llmemory.NULL):
if DEBUG:
callback = _debug_wrapper(callback)
- h = self._gcrootfinder.new(self, callback, arg)
+ x = cintf.save_rvmprof_stack()
+ try:
+ cintf.empty_rvmprof_stack()
+ h = self._gcrootfinder.new(self, callback, arg)
+ finally:
+ cintf.restore_rvmprof_stack(x)
if DEBUG:
debug.add(h)
return h
@@ -34,7 +40,11 @@
def switch(self, stacklet):
if DEBUG:
debug.remove(stacklet)
- h = self._gcrootfinder.switch(stacklet)
+ x = cintf.save_rvmprof_stack()
+ try:
+ h = self._gcrootfinder.switch(stacklet)
+ finally:
+ cintf.restore_rvmprof_stack(x)
if DEBUG:
debug.add(h)
return h
diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -6,7 +6,8 @@
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
-from rpython.rlib import rthread
+from rpython.rlib import rthread, jit
+from rpython.rlib.objectmodel import we_are_translated
class VMProfPlatformUnsupported(Exception):
pass
@@ -96,5 +97,60 @@
return s
def leave_code(s):
+ if not we_are_translated():
+ assert vmprof_tl_stack.getraw() == s
vmprof_tl_stack.setraw(s.c_next)
lltype.free(s, flavor='raw')
+
+#
+# JIT notes:
+#
+# - When running JIT-generated assembler code, we have different custom
+# code to build the VMPROFSTACK, so the functions above are not used.
+# (It uses kind == VMPROF_JITTED_TAG and the VMPROFSTACK is allocated
+# in the C stack.)
+#
+# - The jitcode for decorated_jitted_function() in rvmprof.py is
+# special-cased by jtransform.py to produce this:
+#
+# rvmprof_code(0, unique_id)
+# res = inline_call FUNC <- for func(*args)
+# rvmprof_code(1, unique_id)
+# return res
+#
+# There is no 'catch_exception', but the second 'rvmprof_code' is
+# meant to be executed even in case there was an exception. This is
+# done by a special case in pyjitpl.py and blackhole.py. The point
+# is that the above simple pattern can be detected by the blackhole
+# interp, when it first rebuilds all the intermediate RPython
+# frames; at that point it needs to call jit_rvmprof_code(0) on all
+# intermediate RPython frames, so it does pattern matching to
+# recognize when it must call that and with which 'unique_id' value.
+#
+# - The jitcode opcode 'rvmprof_code' doesn't produce any resop. When
+# meta-interpreting, it causes pyjitpl to call jit_rvmprof_code().
+# As mentioned above, there is logic to call jit_rvmprof_code(1)
+# even if we exit with an exception, even though there is no
+# 'catch_exception'. There is similar logic inside the blackhole
+# interpreter.
+
+
+def jit_rvmprof_code(leaving, unique_id):
+ if leaving == 0:
+ enter_code(unique_id) # ignore the return value
+ else:
+ s = vmprof_tl_stack.getraw()
+ assert s.c_value == unique_id and s.c_kind == VMPROF_CODE_TAG
+ leave_code(s)
+
+#
+# stacklet support
+
+def save_rvmprof_stack():
+ return vmprof_tl_stack.get_or_make_raw()
+
+def empty_rvmprof_stack():
+ vmprof_tl_stack.setraw(lltype.nullptr(VMPROFSTACK))
+
+def restore_rvmprof_stack(x):
+ vmprof_tl_stack.setraw(x)
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -4,7 +4,7 @@
from rpython.rlib.rvmprof import cintf
from rpython.rtyper.annlowlevel import cast_instance_to_gcref
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
-from rpython.rtyper.lltypesystem import rffi, llmemory
+from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rweaklist import RWeakListMixin
@@ -25,10 +25,16 @@
def __str__(self):
return self.msg
+class FakeWeakCodeObjectList(object):
+ def add_handle(self, handle):
+ pass
+
class VMProf(object):
_immutable_fields_ = ['is_enabled?']
+ use_weaklist = True # False for tests
+
def __init__(self):
"NOT_RPYTHON: use _get_vmprof()"
self._code_classes = set()
@@ -40,6 +46,7 @@
def _cleanup_(self):
self.is_enabled = False
+ @jit.dont_look_inside
@specialize.argtype(1)
def register_code(self, code, full_name_func):
"""Register the code object. Call when a new code object is made.
@@ -56,7 +63,7 @@
self._code_unique_id = uid
if self.is_enabled:
self._write_code_registration(uid, full_name_func(code))
- else:
+ elif self.use_weaklist:
code._vmprof_weak_list.add_handle(code)
def register_code_object_class(self, CodeClass, full_name_func):
@@ -81,12 +88,17 @@
if CodeClass in self._code_classes:
return
CodeClass._vmprof_unique_id = 0 # default value: "unknown"
+ immut = CodeClass.__dict__.get('_immutable_fields_', [])
+ CodeClass._immutable_fields_ = list(immut) + ['_vmprof_unique_id']
self._code_classes.add(CodeClass)
#
class WeakCodeObjectList(RWeakListMixin):
def __init__(self):
self.initialize()
- CodeClass._vmprof_weak_list = WeakCodeObjectList()
+ if self.use_weaklist:
+ CodeClass._vmprof_weak_list = WeakCodeObjectList()
+ else:
+ CodeClass._vmprof_weak_list = FakeWeakCodeObjectList()
#
def gather_all_code_objs():
all_code_wrefs = CodeClass._vmprof_weak_list.get_all_handles()
@@ -102,6 +114,7 @@
prev = self._gather_all_code_objs
self._gather_all_code_objs = gather_all_code_objs
+ @jit.dont_look_inside
def enable(self, fileno, interval):
"""Enable vmprof. Writes go to the given 'fileno'.
The sampling interval is given by 'interval' as a number of
@@ -122,6 +135,7 @@
raise VMProfError(os.strerror(rposix.get_saved_errno()))
self.is_enabled = True
+ @jit.dont_look_inside
def disable(self):
"""Disable vmprof.
Raises VMProfError if something goes wrong.
@@ -140,7 +154,8 @@
if self.cintf.vmprof_register_virtual_function(name, uid, 500000) < 0:
raise VMProfError("vmprof buffers full! disk full or too slow")
-def vmprof_execute_code(name, get_code_fn, result_class=None):
+def vmprof_execute_code(name, get_code_fn, result_class=None,
+ _hack_update_stack_untranslated=False):
"""Decorator to be used on the function that interprets a code object.
'name' must be a unique name.
@@ -150,24 +165,40 @@
'result_class' is ignored (backward compatibility).
"""
+ if _hack_update_stack_untranslated:
+ from rpython.rtyper.annlowlevel import llhelper
+ enter_code = llhelper(lltype.Ptr(
+ lltype.FuncType([lltype.Signed], cintf.PVMPROFSTACK)),
+ cintf.enter_code)
+ leave_code = llhelper(lltype.Ptr(
+ lltype.FuncType([cintf.PVMPROFSTACK], lltype.Void)),
+ cintf.leave_code)
+ else:
+ enter_code = cintf.enter_code
+ leave_code = cintf.leave_code
+
def decorate(func):
try:
_get_vmprof()
except cintf.VMProfPlatformUnsupported:
return func
+ @jit.oopspec("rvmprof.jitted(unique_id)")
+ def decorated_jitted_function(unique_id, *args):
+ return func(*args)
+
def decorated_function(*args):
- # If we are being JITted, we want to skip the trampoline, else the
- # JIT cannot see through it.
+ unique_id = get_code_fn(*args)._vmprof_unique_id
+ unique_id = rffi.cast(lltype.Signed, unique_id)
+ # ^^^ removes the "known non-negative" hint for annotation
if not jit.we_are_jitted():
- unique_id = get_code_fn(*args)._vmprof_unique_id
- x = cintf.enter_code(unique_id)
+ x = enter_code(unique_id)
try:
return func(*args)
finally:
- cintf.leave_code(x)
+ leave_code(x)
else:
- return func(*args)
+ return decorated_jitted_function(unique_id, *args)
decorated_function.__name__ = func.__name__ + '_rvmprof'
return decorated_function
From pypy.commits at gmail.com Tue Aug 9 03:53:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 00:53:44 -0700 (PDT)
Subject: [pypy-commit] pypy inline-blocks: Preserve indentation. Allows us
to read the diff more easily
Message-ID: <57a98c08.c62f1c0a.88ea8.9705@mx.google.com>
Author: Armin Rigo
Branch: inline-blocks
Changeset: r86095:294e881d0a90
Date: 2016-08-09 09:53 +0200
http://bitbucket.org/pypy/pypy/changeset/294e881d0a90/
Log: Preserve indentation. Allows us to read the diff more easily
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -193,75 +193,76 @@
yield line
def gen_block(self, block):
- myblocknum = self.blocknum[block]
- if block in self.inlinable_blocks:
- # debug comment
- yield '/* block%d: (inlined) */' % myblocknum
- else:
- yield 'block%d:' % myblocknum
- if block in self.innerloops:
- for line in self.gen_while_loop_hack(block):
- yield line
- return
- for i, op in enumerate(block.operations):
- for line in self.gen_op(op):
- yield line
- if len(block.exits) == 0:
- assert len(block.inputargs) == 1
- # regular return block
- retval = self.expr(block.inputargs[0])
- if self.exception_policy != "exc_helper":
- yield 'RPY_DEBUG_RETURN();'
- yield 'return %s;' % retval
- return
- elif block.exitswitch is None:
- # single-exit block
- assert len(block.exits) == 1
- for op in self.gen_link(block.exits[0]):
- yield op
- else:
- assert not block.canraise
- # block ending in a switch on a value
- TYPE = self.lltypemap(block.exitswitch)
- if TYPE == Bool:
- expr = self.expr(block.exitswitch)
- for link in block.exits[:0:-1]:
+ if 1: # (preserve indentation)
+ myblocknum = self.blocknum[block]
+ if block in self.inlinable_blocks:
+ # debug comment
+ yield '/* block%d: (inlined) */' % myblocknum
+ else:
+ yield 'block%d:' % myblocknum
+ if block in self.innerloops:
+ for line in self.gen_while_loop_hack(block):
+ yield line
+ return
+ for i, op in enumerate(block.operations):
+ for line in self.gen_op(op):
+ yield line
+ if len(block.exits) == 0:
+ assert len(block.inputargs) == 1
+ # regular return block
+ retval = self.expr(block.inputargs[0])
+ if self.exception_policy != "exc_helper":
+ yield 'RPY_DEBUG_RETURN();'
+ yield 'return %s;' % retval
+ return
+ elif block.exitswitch is None:
+ # single-exit block
+ assert len(block.exits) == 1
+ for op in self.gen_link(block.exits[0]):
+ yield op
+ else:
+ assert not block.canraise
+ # block ending in a switch on a value
+ TYPE = self.lltypemap(block.exitswitch)
+ if TYPE == Bool:
+ expr = self.expr(block.exitswitch)
+ for link in block.exits[:0:-1]:
+ assert link.exitcase in (False, True)
+ if not link.exitcase:
+ expr = '!' + expr
+ yield 'if (%s) {' % expr
+ for op in self.gen_link(link):
+ yield '\t' + op
+ yield '}'
+ link = block.exits[0]
assert link.exitcase in (False, True)
- if not link.exitcase:
- expr = '!' + expr
- yield 'if (%s) {' % expr
for op in self.gen_link(link):
- yield '\t' + op
+ yield op
+ elif TYPE in (Signed, Unsigned, SignedLongLong,
+ UnsignedLongLong, Char, UniChar):
+ defaultlink = None
+ expr = self.expr(block.exitswitch)
+ yield 'switch (%s) {' % self.expr(block.exitswitch)
+ for link in block.exits:
+ if link.exitcase == 'default':
+ defaultlink = link
+ continue
+ yield 'case %s:' % self.db.get(link.llexitcase)
+ for op in self.gen_link(link):
+ yield '\t' + op
+ # 'break;' not needed, as gen_link ends in a 'goto'
+ # Emit default case
+ yield 'default:'
+ if defaultlink is None:
+ yield '\tassert(!"bad switch!!"); abort();'
+ else:
+ for op in self.gen_link(defaultlink):
+ yield '\t' + op
+
yield '}'
- link = block.exits[0]
- assert link.exitcase in (False, True)
- for op in self.gen_link(link):
- yield op
- elif TYPE in (Signed, Unsigned, SignedLongLong,
- UnsignedLongLong, Char, UniChar):
- defaultlink = None
- expr = self.expr(block.exitswitch)
- yield 'switch (%s) {' % self.expr(block.exitswitch)
- for link in block.exits:
- if link.exitcase == 'default':
- defaultlink = link
- continue
- yield 'case %s:' % self.db.get(link.llexitcase)
- for op in self.gen_link(link):
- yield '\t' + op
- # 'break;' not needed, as gen_link ends in a 'goto'
- # Emit default case
- yield 'default:'
- if defaultlink is None:
- yield '\tassert(!"bad switch!!"); abort();'
else:
- for op in self.gen_link(defaultlink):
- yield '\t' + op
-
- yield '}'
- else:
- raise TypeError("exitswitch type not supported"
- " Got %r" % (TYPE,))
+ raise TypeError("exitswitch type not supported"
+ " Got %r" % (TYPE,))
def gen_link(self, link):
"Generate the code to jump across the given Link."
From pypy.commits at gmail.com Tue Aug 9 04:23:19 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 01:23:19 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: another test testing unpack
operation (mostly integer)
Message-ID: <57a992f7.28eac20a.8f95a.8def@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86096:5d676fb9307f
Date: 2016-08-09 10:22 +0200
http://bitbucket.org/pypy/pypy/changeset/5d676fb9307f/
Log: another test testing unpack operation (mostly integer)
diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -754,17 +754,36 @@
vars['x'] = v
packs = '\n '.join(pack)
resvar = suffix + '{'+suffix+'}'
+
+ # format the resoperations, take care that the lhs of =
+ # is formated later with a new variable name
+ unpackops = unpack
+ if isinstance(unpack, str):
+ unpackops = [unpack]
+ unpacksf = []
+ for up in unpackops:
+ lhs, rhs = up.split("=")
+ rhsf = rhs.format(**vars)
+ newvar('i'); newvar('f'); newvar('v')
+ lhsf = lhs.format(**vars)
+ unpacksf.append(lhsf + '=' + rhsf)
+ unpacks = '\n '.join(unpacksf)
+
source = '''
[{args}]
label({args}, descr=targettoken)
{packs}
- {unpack}
+ {unpacks}
finish({resvar}, descr=finaldescr)
- '''.format(args=','.join(args),packs=packs, unpack=unpack.format(**vars),
+ '''.format(args=','.join(args),packs=packs, unpacks=unpacks,
resvar=resvar.format(**vars))
- loop = parse(source, namespace={'targettoken': targettoken,
- 'finaldescr': finaldescr})
+ print(source)
+ return self._compile_and_run(source, args_values, float,
+ ns={'targettoken': targettoken, 'finaldescr': finaldescr})
+
+ def _compile_and_run(self, source, args_values, float=True, ns={}):
+ loop = parse(source, namespace=ns)
cpu = self.CPUClass(rtyper=None, stats=None)
cpu.setup_once()
#
@@ -792,11 +811,37 @@
## integer unpack (byte)
for i in range(16):
op = "i{i} = vec_unpack_i({x}, %d, 1)" % i
- assert self.run_unpack(op, "[16xi8]", {'x': [127,1]*8}, float=False) == (127 if i%2==0 else 1)
+ assert self.run_unpack(op, "[16xi8]", {'x': [127,1]*8}, float=False) == \
+ (127 if i%2==0 else 1)
if i < 8:
- assert self.run_unpack(op, "[2xi16]", {'x': [2**15-1,0]*4}, float=False) == (2**15-1 if i%2==0 else 0)
+ assert self.run_unpack(op, "[8xi16]", {'x': [2**15-1,0]*4}, float=False) == \
+ (2**15-1 if i%2==0 else 0)
if i < 4:
- assert self.run_unpack(op, "[2xi32]", {'x': [2**31-1,0]*4}, float=False) == (2**31-1 if i%2==0 else 0)
+ assert self.run_unpack(op, "[4xi32]", {'x': [2**31-1,0]*4}, float=False) == \
+ (2**31-1 if i%2==0 else 0)
+
+ def test_unpack_several(self):
+ # count == 2
+ values = [1,2,3,4]
+ for i,v in enumerate(values):
+ j = (i // 2) * 2
+ op = ["v{v}[2xi32] = vec_unpack_i({x}, %d, 2)" % j,
+ "i{i} = vec_unpack_i(v{v}[2xi32], %d, 1)" % i]
+ assert self.run_unpack(op, "[4xi32]", {'x': values}, float=False) == v
+
+ values = [1,2,3,4,5,6,7,8]
+ for i,v in enumerate(values):
+ j = (i // 4) * 4
+ op = ["v{v}[4xi16] = vec_unpack_i({x}, %d, 4)" % j,
+ "i{i} = vec_unpack_i(v{v}[4xi16], %d, 1)" % i]
+ assert self.run_unpack(op, "[8xi16]", {'x': values}, float=False) == v
+
+ values = [1,2,3,4,5,6,7,8] * 2
+ for i,v in enumerate(values):
+ j = (i // 8) * 8
+ op = ["v{v}[8xi8] = vec_unpack_i({x}, %d, 8)" % j,
+ "i{i} = vec_unpack_i(v{v}[8xi8], %d, 1)" % i]
+ assert self.run_unpack(op, "[16xi8]", {'x': values}, float=False) == v
class TestLLtype(LLJitMixin, VectorizeTests):
From pypy.commits at gmail.com Tue Aug 9 04:32:13 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 01:32:13 -0700 (PDT)
Subject: [pypy-commit] pypy default: Factor out some of the timeout logic.
Should be a no-op.
Message-ID: <57a9950d.a710c20a.3fa53.97fa@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86097:c6334dab174d
Date: 2016-08-09 10:31 +0200
http://bitbucket.org/pypy/pypy/changeset/c6334dab174d/
Log: Factor out some of the timeout logic. Should be a no-op.
diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -846,22 +846,27 @@
if res < 0:
raise self.error_handler()
+ def wait_for_data(self, for_writing):
+ timeout = self._select(for_writing)
+ if timeout != 0:
+ if timeout == 1:
+ raise SocketTimeout
+ else:
+ raise self.error_handler()
+
def recv(self, buffersize, flags=0):
"""Receive up to buffersize bytes from the socket. For the optional
flags argument, see the Unix manual. When no data is available, block
until at least one byte is available or until the remote end is closed.
When the remote end is closed and all data is read, return the empty
string."""
- timeout = self._select(False)
- if timeout == 1:
- raise SocketTimeout
- elif timeout == 0:
- with rffi.scoped_alloc_buffer(buffersize) as buf:
- read_bytes = _c.socketrecv(self.fd,
- rffi.cast(rffi.VOIDP, buf.raw),
- buffersize, flags)
- if read_bytes >= 0:
- return buf.str(read_bytes)
+ self.wait_for_data(False)
+ with rffi.scoped_alloc_buffer(buffersize) as buf:
+ read_bytes = _c.socketrecv(self.fd,
+ rffi.cast(rffi.VOIDP, buf.raw),
+ buffersize, flags)
+ if read_bytes >= 0:
+ return buf.str(read_bytes)
raise self.error_handler()
def recvinto(self, rwbuffer, nbytes, flags=0):
@@ -874,26 +879,23 @@
"""Like recv(buffersize, flags) but also return the sender's
address."""
read_bytes = -1
- timeout = self._select(False)
- if timeout == 1:
- raise SocketTimeout
- elif timeout == 0:
- with rffi.scoped_alloc_buffer(buffersize) as buf:
- address, addr_p, addrlen_p = self._addrbuf()
- try:
- read_bytes = _c.recvfrom(self.fd, buf.raw, buffersize, flags,
- addr_p, addrlen_p)
- addrlen = rffi.cast(lltype.Signed, addrlen_p[0])
- finally:
- lltype.free(addrlen_p, flavor='raw')
- address.unlock()
- if read_bytes >= 0:
- if addrlen:
- address.addrlen = addrlen
- else:
- address = None
- data = buf.str(read_bytes)
- return (data, address)
+ self.wait_for_data(False)
+ with rffi.scoped_alloc_buffer(buffersize) as buf:
+ address, addr_p, addrlen_p = self._addrbuf()
+ try:
+ read_bytes = _c.recvfrom(self.fd, buf.raw, buffersize, flags,
+ addr_p, addrlen_p)
+ addrlen = rffi.cast(lltype.Signed, addrlen_p[0])
+ finally:
+ lltype.free(addrlen_p, flavor='raw')
+ address.unlock()
+ if read_bytes >= 0:
+ if addrlen:
+ address.addrlen = addrlen
+ else:
+ address = None
+ data = buf.str(read_bytes)
+ return (data, address)
raise self.error_handler()
def recvfrom_into(self, rwbuffer, nbytes, flags=0):
@@ -903,12 +905,8 @@
def send_raw(self, dataptr, length, flags=0):
"""Send data from a CCHARP buffer."""
- res = -1
- timeout = self._select(True)
- if timeout == 1:
- raise SocketTimeout
- elif timeout == 0:
- res = _c.send(self.fd, dataptr, length, flags)
+ self.wait_for_data(True)
+ res = _c.send(self.fd, dataptr, length, flags)
if res < 0:
raise self.error_handler()
return res
@@ -942,15 +940,11 @@
def sendto(self, data, flags, address):
"""Like send(data, flags) but allows specifying the destination
address. (Note that 'flags' is mandatory here.)"""
- res = -1
- timeout = self._select(True)
- if timeout == 1:
- raise SocketTimeout
- elif timeout == 0:
- addr = address.lock()
- res = _c.sendto(self.fd, data, len(data), flags,
- addr, address.addrlen)
- address.unlock()
+ self.wait_for_data(True)
+ addr = address.lock()
+ res = _c.sendto(self.fd, data, len(data), flags,
+ addr, address.addrlen)
+ address.unlock()
if res < 0:
raise self.error_handler()
return res
From pypy.commits at gmail.com Tue Aug 9 04:52:19 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 01:52:19 -0700 (PDT)
Subject: [pypy-commit] pypy default: Fix socket.recvfrom() so that it takes
advantage of the fact that,
Message-ID: <57a999c3.a111c20a.a2129.8dba@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86098:e53ea5c9c384
Date: 2016-08-09 10:51 +0200
http://bitbucket.org/pypy/pypy/changeset/e53ea5c9c384/
Log: Fix socket.recvfrom() so that it takes advantage of the fact that,
nowadays, a lot of buffers have a get_raw_address()
diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -862,23 +862,30 @@
string."""
self.wait_for_data(False)
with rffi.scoped_alloc_buffer(buffersize) as buf:
- read_bytes = _c.socketrecv(self.fd,
- rffi.cast(rffi.VOIDP, buf.raw),
- buffersize, flags)
+ read_bytes = _c.socketrecv(self.fd, buf.raw, buffersize, flags)
if read_bytes >= 0:
return buf.str(read_bytes)
raise self.error_handler()
def recvinto(self, rwbuffer, nbytes, flags=0):
- buf = self.recv(nbytes, flags)
- rwbuffer.setslice(0, buf)
- return len(buf)
+ try:
+ rwbuffer.get_raw_address()
+ except ValueError:
+ buf = self.recv(nbytes, flags)
+ rwbuffer.setslice(0, buf)
+ return len(buf)
+ else:
+ self.wait_for_data(False)
+ raw = rwbuffer.get_raw_address()
+ read_bytes = _c.socketrecv(self.fd, raw, nbytes, flags)
+ if read_bytes >= 0:
+ return read_bytes
+ raise self.error_handler()
@jit.dont_look_inside
def recvfrom(self, buffersize, flags=0):
"""Like recv(buffersize, flags) but also return the sender's
address."""
- read_bytes = -1
self.wait_for_data(False)
with rffi.scoped_alloc_buffer(buffersize) as buf:
address, addr_p, addrlen_p = self._addrbuf()
@@ -899,9 +906,30 @@
raise self.error_handler()
def recvfrom_into(self, rwbuffer, nbytes, flags=0):
- buf, addr = self.recvfrom(nbytes, flags)
- rwbuffer.setslice(0, buf)
- return len(buf), addr
+ try:
+ rwbuffer.get_raw_address()
+ except ValueError:
+ buf, addr = self.recvfrom(nbytes, flags)
+ rwbuffer.setslice(0, buf)
+ return len(buf), addr
+ else:
+ self.wait_for_data(False)
+ address, addr_p, addrlen_p = self._addrbuf()
+ try:
+ raw = rwbuffer.get_raw_address()
+ read_bytes = _c.recvfrom(self.fd, raw, nbytes, flags,
+ addr_p, addrlen_p)
+ addrlen = rffi.cast(lltype.Signed, addrlen_p[0])
+ finally:
+ lltype.free(addrlen_p, flavor='raw')
+ address.unlock()
+ if read_bytes >= 0:
+ if addrlen:
+ address.addrlen = addrlen
+ else:
+ address = None
+ return (read_bytes, address)
+ raise self.error_handler()
def send_raw(self, dataptr, length, flags=0):
"""Send data from a CCHARP buffer."""
diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py
--- a/rpython/rlib/test/test_rsocket.py
+++ b/rpython/rlib/test/test_rsocket.py
@@ -119,25 +119,111 @@
s1.close()
s2.close()
-def test_socketpair_recvinto():
+def test_socketpair_recvinto_1():
class Buffer:
def setslice(self, start, string):
self.x = string
- def as_str(self):
- return self.x
+ def get_raw_address(self):
+ raise ValueError
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
- s2.recvinto(buf, 1)
- assert buf.as_str() == '?'
+ n = s2.recvinto(buf, 1)
+ assert n == 1
+ assert buf.x == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
- s1.recvinto(buf, 100)
- assert buf.as_str() == 'x'*count
+ n = s1.recvinto(buf, 100)
+ assert n == count
+ assert buf.x == 'x'*count
+ s1.close()
+ s2.close()
+
+def test_socketpair_recvinto_2():
+ class Buffer:
+ def __init__(self):
+ self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
+ track_allocation=False)
+
+ def _as_str(self, count):
+ return rffi.charpsize2str(self._p, count)
+
+ def get_raw_address(self):
+ return self._p
+
+ if sys.platform == "win32":
+ py.test.skip('No socketpair on Windows')
+ s1, s2 = socketpair()
+ buf = Buffer()
+ s1.sendall('?')
+ n = s2.recvinto(buf, 1)
+ assert n == 1
+ assert buf._as_str(1) == '?'
+ count = s2.send('x'*99)
+ assert 1 <= count <= 99
+ n = s1.recvinto(buf, 100)
+ assert n == count
+ assert buf._as_str(n) == 'x'*count
+ s1.close()
+ s2.close()
+
+def test_socketpair_recvfrom_into_1():
+ class Buffer:
+ def setslice(self, start, string):
+ self.x = string
+
+ def get_raw_address(self):
+ raise ValueError
+
+ if sys.platform == "win32":
+ py.test.skip('No socketpair on Windows')
+ s1, s2 = socketpair()
+ buf = Buffer()
+ s1.sendall('?')
+ n, addr = s2.recvfrom_into(buf, 1)
+ assert n == 1
+ assert addr is None
+ assert buf.x == '?'
+ count = s2.send('x'*99)
+ assert 1 <= count <= 99
+ n, addr = s1.recvfrom_into(buf, 100)
+ assert n == count
+ assert addr is None
+ assert buf.x == 'x'*count
+ s1.close()
+ s2.close()
+
+def test_socketpair_recvfrom_into_2():
+ class Buffer:
+ def __init__(self):
+ self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
+ track_allocation=False)
+
+ def _as_str(self, count):
+ return rffi.charpsize2str(self._p, count)
+
+ def get_raw_address(self):
+ return self._p
+
+ if sys.platform == "win32":
+ py.test.skip('No socketpair on Windows')
+ s1, s2 = socketpair()
+ buf = Buffer()
+ s1.sendall('?')
+ n, addr = s2.recvfrom_into(buf, 1)
+ assert n == 1
+ assert addr is None
+ assert buf._as_str(1) == '?'
+ count = s2.send('x'*99)
+ assert 1 <= count <= 99
+ n, addr = s1.recvfrom_into(buf, 100)
+ assert n == count
+ assert addr is None
+ assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
From pypy.commits at gmail.com Tue Aug 9 05:15:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 02:15:44 -0700 (PDT)
Subject: [pypy-commit] pypy inline-blocks: Close branch inline-blocks
Message-ID: <57a99f40.a111c20a.a2129.97ec@mx.google.com>
Author: Armin Rigo
Branch: inline-blocks
Changeset: r86099:c5053a699285
Date: 2016-08-09 11:14 +0200
http://bitbucket.org/pypy/pypy/changeset/c5053a699285/
Log: Close branch inline-blocks
From pypy.commits at gmail.com Tue Aug 9 05:16:23 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 02:16:23 -0700 (PDT)
Subject: [pypy-commit] pypy default: Merged in inline-blocks (pull request
#467)
Message-ID: <57a99f67.8bc71c0a.7f7bb.b721@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86100:4c64ef74c612
Date: 2016-08-09 11:14 +0200
http://bitbucket.org/pypy/pypy/changeset/4c64ef74c612/
Log: Merged in inline-blocks (pull request #467)
Inline gotos to blocks with only one predecessor.
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -2,7 +2,7 @@
from rpython.translator.c.support import cdecl
from rpython.translator.c.support import llvalue_from_constant, gen_assignments
from rpython.translator.c.support import c_string_constant, barebonearray
-from rpython.flowspace.model import Variable, Constant
+from rpython.flowspace.model import Variable, Constant, mkentrymap
from rpython.rtyper.lltypesystem.lltype import (Ptr, Void, Bool, Signed, Unsigned,
SignedLongLong, Float, UnsignedLongLong, Char, UniChar, ContainerType,
Array, FixedSizeArray, ForwardReference, FuncType)
@@ -173,17 +173,37 @@
def cfunction_body(self):
graph = self.graph
- yield 'goto block0;' # to avoid a warning "this label is not used"
+ # yield 'goto block0;' # to avoid a warning "this label is not used"
- # generate the body of each block
+ # Locate blocks with a single predecessor, which can be written
+ # inline in place of a "goto":
+ entrymap = mkentrymap(graph)
+ self.inlinable_blocks = {
+ block for block in entrymap if len(entrymap[block]) == 1}
+
+ yield ''
+ for line in self.gen_goto(graph.startblock):
+ yield line
+
+ # Only blocks left are those that have more than one predecessor.
for block in graph.iterblocks():
+ if block in self.inlinable_blocks:
+ continue
+ for line in self.gen_block(block):
+ yield line
+
+ def gen_block(self, block):
+ if 1: # (preserve indentation)
myblocknum = self.blocknum[block]
- yield ''
- yield 'block%d:' % myblocknum
+ if block in self.inlinable_blocks:
+ # debug comment
+ yield '/* block%d: (inlined) */' % myblocknum
+ else:
+ yield 'block%d:' % myblocknum
if block in self.innerloops:
for line in self.gen_while_loop_hack(block):
yield line
- continue
+ return
for i, op in enumerate(block.operations):
for line in self.gen_op(op):
yield line
@@ -194,7 +214,7 @@
if self.exception_policy != "exc_helper":
yield 'RPY_DEBUG_RETURN();'
yield 'return %s;' % retval
- continue
+ return
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
@@ -256,12 +276,25 @@
assignments.append((a2typename, dest, src))
for line in gen_assignments(assignments):
yield line
- label = 'block%d' % self.blocknum[link.target]
- if link.target in self.innerloops:
- loop = self.innerloops[link.target]
+ for line in self.gen_goto(link.target, link):
+ yield line
+
+ def gen_goto(self, target, link=None):
+ """Recursively expand block with inlining or goto.
+
+ Blocks that have only one predecessor are inlined directly, all others
+ are reached via goto.
+ """
+ label = 'block%d' % self.blocknum[target]
+ if target in self.innerloops:
+ loop = self.innerloops[target]
if link is loop.links[-1]: # link that ends a loop
label += '_back'
- yield 'goto %s;' % label
+ if target in self.inlinable_blocks:
+ for line in self.gen_block(target):
+ yield line
+ else:
+ yield 'goto %s;' % label
def gen_op(self, op):
macro = 'OP_%s' % op.opname.upper()
diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py
--- a/rpython/translator/c/test/test_genc.py
+++ b/rpython/translator/c/test/test_genc.py
@@ -1,4 +1,5 @@
import ctypes
+import re
from collections import OrderedDict
import py
@@ -13,6 +14,7 @@
from rpython.rtyper.lltypesystem.rstr import STR
from rpython.tool.nullpath import NullPyPathLocal
from rpython.translator.c import genc
+from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks
from rpython.translator.interactive import Translation
from rpython.translator.translator import TranslationContext, graphof
@@ -604,3 +606,23 @@
else:
assert 0, "the call was not found in the C source"
assert 'PYPY_INHIBIT_TAIL_CALL();' in lines[i+1]
+
+def get_generated_c_source(fn, types):
+ """Return the generated C source for fn."""
+ t = Translation(fn, types, backend="c")
+ t.annotate()
+ merge_if_blocks(t.driver.translator.graphs[0])
+ c_filename_path = t.source_c()
+ return t.driver.cbuilder.c_source_filename.join('..',
+ 'rpython_translator_c_test.c').read()
+
+def test_generated_c_source_no_gotos():
+ # We want simple functions to have no indirection/goto.
+ # Instead, PyPy can inline blocks when they aren't reused.
+
+ def main(x):
+ return x + 1
+
+ c_src = get_generated_c_source(main, [int])
+ assert 'goto' not in c_src
+ assert not re.search(r'block\w*:(?! \(inlined\))', c_src)
From pypy.commits at gmail.com Tue Aug 9 05:36:11 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 02:36:11 -0700 (PDT)
Subject: [pypy-commit] pypy default: Some missing keepalives (I think)
Message-ID: <57a9a40b.eeb8c20a.6ebe3.a8cb@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86101:f5b4cb2bb733
Date: 2016-08-09 11:35 +0200
http://bitbucket.org/pypy/pypy/changeset/f5b4cb2bb733/
Log: Some missing keepalives (I think)
diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -878,6 +878,7 @@
self.wait_for_data(False)
raw = rwbuffer.get_raw_address()
read_bytes = _c.socketrecv(self.fd, raw, nbytes, flags)
+ keepalive_until_here(rwbuffer)
if read_bytes >= 0:
return read_bytes
raise self.error_handler()
@@ -919,6 +920,7 @@
raw = rwbuffer.get_raw_address()
read_bytes = _c.recvfrom(self.fd, raw, nbytes, flags,
addr_p, addrlen_p)
+ keepalive_until_here(rwbuffer)
addrlen = rffi.cast(lltype.Signed, addrlen_p[0])
finally:
lltype.free(addrlen_p, flavor='raw')
From pypy.commits at gmail.com Tue Aug 9 05:46:14 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Tue, 09 Aug 2016 02:46:14 -0700 (PDT)
Subject: [pypy-commit] pypy default: remove outdated comment
Message-ID: <57a9a666.81cb1c0a.d1959.c2f6@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r86102:6a3c7c1c12ef
Date: 2016-08-09 11:45 +0200
http://bitbucket.org/pypy/pypy/changeset/6a3c7c1c12ef/
Log: remove outdated comment
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -173,7 +173,6 @@
def cfunction_body(self):
graph = self.graph
- # yield 'goto block0;' # to avoid a warning "this label is not used"
# Locate blocks with a single predecessor, which can be written
# inline in place of a "goto":
From pypy.commits at gmail.com Tue Aug 9 09:59:02 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 06:59:02 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: merged py3.5 changes
Message-ID: <57a9e1a6.4bc41c0a.52bee.2efd@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86103:3db60dde36a5
Date: 2016-08-09 14:13 +0200
http://bitbucket.org/pypy/pypy/changeset/3db60dde36a5/
Log: merged py3.5 changes
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -18,7 +18,6 @@
from pypy.interpreter.nestedscope import Cell
from pypy.interpreter.pycode import PyCode, BytecodeCorruption
from pypy.tool.stdlib_opcode import bytecode_spec
-from pypy.objspace.std.dictmultiobject import W_DictMultiObject
CANNOT_CATCH_MSG = ("catching classes that don't inherit from BaseException "
"is not allowed in 3.x")
@@ -1391,9 +1390,8 @@
self.pushvalue(w_sum)
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
- space = self.space
w_sum_list = list_unpack_helper(self, itemcount)
- self.pushvalue(space.newtuple(w_sum_list))
+ self.pushvalue(self.space.newtuple(w_sum_list))
def BUILD_LIST_UNPACK(self, itemcount, next_instr):
w_sum = list_unpack_helper(self, itemcount)
diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py
--- a/pypy/interpreter/test/test_interpreter.py
+++ b/pypy/interpreter/test/test_interpreter.py
@@ -256,7 +256,63 @@
return a, b, c, d
"""
assert self.codetest(code, "f", [1, 2], {"d" : 4, "c" : 3}) == (1, 2, 3, 4)
-
+
+ def test_build_set_unpack(self):
+ code = """ def f():
+ return {*range(4), 4, *(5, 6, 7)}
+ """
+ space = self.space
+ res = self.codetest(code, "f", [])
+ l_res = space.call_function(space.w_list, res)
+ assert space.unwrap(l_res) == [0, 1, 2, 3, 4, 5, 6, 7]
+
+ def test_build_tuple_unpack(self):
+ code = """ def f():
+ return (*range(4), 4)
+ """
+ assert self.codetest(code, "f", []) == (0, 1, 2, 3, 4)
+
+ def test_build_list_unpack(self):
+ code = """ def f():
+ return [*range(4), 4]
+ """
+ assert self.codetest(code, "f", []) == [0, 1, 2, 3, 4]
+
+ def test_build_map_unpack(self):
+ code = """
+ def f():
+ return {'x': 1, **{'y': 2}}
+ def g():
+ return {**()}
+ """
+ assert self.codetest(code, "f", []) == {'x': 1, 'y': 2}
+ res = self.codetest(code, 'g', [])
+ assert "TypeError:" in res
+ assert "'tuple' object is not a mapping" in res
+
+ def test_build_map_unpack_with_call(self):
+ code = """
+ def f(a,b,c,d):
+ return a+b,c+d
+ def g1():
+ return f(**{'a': 1, 'c': 3}, **{'b': 2, 'd': 4})
+ def g2():
+ return f(**{'a': 1, 'c': 3}, **[])
+ def g3():
+ return f(**{'a': 1, 'c': 3}, **{1: 3})
+ def g4():
+ return f(**{'a': 1, 'c': 3}, **{'a': 2})
+ """
+ assert self.codetest(code, "g1", []) == (3, 7)
+ resg2 = self.codetest(code, 'g2', [])
+ assert "TypeError:" in resg2
+ assert "'list' object is not a mapping" in resg2
+ resg3 = self.codetest(code, 'g3', [])
+ assert "TypeError:" in resg3
+ assert "keywords must be strings" in resg3
+ resg4 = self.codetest(code, 'g4', [])
+ assert "TypeError:" in resg4
+ assert "f() got multiple values for keyword argument 'a'" in resg4
class AppTestInterpreter:
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -0,0 +1,14 @@
+class AppTestAsyncIO(object):
+
+ spaceconfig = dict(usemodules=["select","_socket","thread","signal","struct","_multiprocessing","array","_posixsubprocess","fcntl","unicodedata"])
+
+ def test_gil_issue(self):
+ # the problem occured at await asyncio.open_connection after calling run_until_complete
+ """
+ import encodings.idna
+ import asyncio
+ async def f():
+ reader, writer = await asyncio.open_connection('example.com', 80)
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(f())"""
diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py
--- a/pypy/module/thread/os_lock.py
+++ b/pypy/module/thread/os_lock.py
@@ -147,7 +147,8 @@
def set_sentinel(space):
"""Set a sentinel lock that will be released when the current thread
state is finalized (after it is untied from the interpreter)."""
- return space.wrap(Lock(space))
+ lock = allocate_lock(space)
+ return lock
class W_RLock(W_Root):
def __init__(self, space):
From pypy.commits at gmail.com Tue Aug 9 09:59:05 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 06:59:05 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: fixed translatable
build_map_unpack(_with_call), passes tests
Message-ID: <57a9e1a9.482cc20a.13e8b.1b60@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86104:ae4d7b55324b
Date: 2016-08-09 14:37 +0200
http://bitbucket.org/pypy/pypy/changeset/ae4d7b55324b/
Log: fixed translatable build_map_unpack(_with_call), passes tests
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -45,10 +45,10 @@
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
-def get_func_desc(func):
- if self.space.type(func) is function.Function:
+def get_func_desc(space, func):
+ if isinstance(func,function.Function):
return "()"
- elif self.space.type(func) is function.Method:
+ elif isinstance(func, function.Method):
return "()"
else:
return " object";
@@ -1404,7 +1404,7 @@
w_dict = space.newdict()
for i in range(num_maps, 0, -1):
w_item = self.peekvalue(i-1)
- if space.lookup(w_item, '__getitem__') is None:
+ if not space.ismapping_w(w_item):
raise oefmt(space.w_TypeError,
"'%T' object is not a mapping", w_item)
iterator = w_item.iterkeys()
@@ -1416,13 +1416,13 @@
err_fun = self.peekvalue(num_maps + function_location-1)
raise oefmt(space.w_TypeError,
"%N%s keywords must be strings", err_fun,
- get_func_desc(err_fun))
+ get_func_desc(space, err_fun))
if space.is_true(space.contains(w_dict,w_key)):
err_fun = self.peekvalue(num_maps + function_location-1)
err_arg = w_key
raise oefmt(space.w_TypeError,
- "%N%s got multiple values for keyword argument %s",
- err_fun, get_func_desc(err_fun), err_arg)
+ "%N%s got multiple values for keyword argument '%s'",
+ err_fun, get_func_desc(space, err_fun), space.str_w(err_arg))
space.call_method(w_dict, 'update', w_item)
while num_maps != 0:
self.popvalue()
@@ -1434,7 +1434,7 @@
w_dict = space.newdict()
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
- if space.lookup(w_item, '__getitem__') is None:
+ if not space.ismapping_w(w_item):
raise oefmt(self.space.w_TypeError,
"'%T' object is not a mapping", w_item)
space.call_method(w_dict, 'update', w_item)
From pypy.commits at gmail.com Tue Aug 9 09:59:07 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 06:59:07 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: some more translation
issues resolved
Message-ID: <57a9e1ab.e2efc20a.3d189.3191@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86105:2ac7d3003c50
Date: 2016-08-09 15:58 +0200
http://bitbucket.org/pypy/pypy/changeset/2ac7d3003c50/
Log: some more translation issues resolved
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -59,7 +59,6 @@
w_sum = space.newlist([], sizehint=itemcount)
for i in range(itemcount, 0, -1):
w_item = frame.peekvalue(i-1)
- #items = frame.space.fixedview(w_item)
w_sum.extend(w_item)
while itemcount != 0:
frame.popvalue()
@@ -1390,8 +1389,9 @@
self.pushvalue(w_sum)
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
- w_sum_list = list_unpack_helper(self, itemcount)
- self.pushvalue(self.space.newtuple(w_sum_list))
+ w_list = list_unpack_helper(self, itemcount)
+ items = [w_obj for w_obj in w_list.getitems_unroll()]
+ self.pushvalue(self.space.newtuple(items))
def BUILD_LIST_UNPACK(self, itemcount, next_instr):
w_sum = list_unpack_helper(self, itemcount)
@@ -1407,7 +1407,7 @@
if not space.ismapping_w(w_item):
raise oefmt(space.w_TypeError,
"'%T' object is not a mapping", w_item)
- iterator = w_item.iterkeys()
+ iterator = w_item.iterkeys(w_item)
while True:
w_key = iterator.next_key()
if w_key is None:
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -1,9 +1,13 @@
class AppTestAsyncIO(object):
- spaceconfig = dict(usemodules=["select","_socket","thread","signal","struct","_multiprocessing","array","_posixsubprocess","fcntl","unicodedata"])
+ spaceconfig = dict(usemodules=["select","_socket","thread","signal",
+ "struct","_multiprocessing","array",
+ "_posixsubprocess","fcntl",
+ "unicodedata"])
def test_gil_issue(self):
- # the problem occured at await asyncio.open_connection after calling run_until_complete
+ # the problem occured at await asyncio.open_connection
+ # after calling run_until_complete
"""
import encodings.idna
import asyncio
@@ -11,4 +15,6 @@
reader, writer = await asyncio.open_connection('example.com', 80)
loop = asyncio.get_event_loop()
- loop.run_until_complete(f())"""
+ loop.run_until_complete(f())
+ print("done with async loop")
+ """
diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py
--- a/pypy/module/zipimport/interp_zipimport.py
+++ b/pypy/module/zipimport/interp_zipimport.py
@@ -82,7 +82,7 @@
for key in self.cache.keys()]
return space.newlist(items_w)
- def iterkeys(self, space):
+ def iteratekeys(self, space):
return space.iter(self.keys(space))
def itervalues(self, space):
@@ -106,11 +106,11 @@
'zip_dict',
__getitem__ = interp2app(W_ZipCache.getitem),
__contains__ = interp2app(W_ZipCache.contains),
- __iter__ = interp2app(W_ZipCache.iterkeys),
+ __iter__ = interp2app(W_ZipCache.iteratekeys),
items = interp2app(W_ZipCache.items),
iteritems = interp2app(W_ZipCache.iteritems),
keys = interp2app(W_ZipCache.keys),
- iterkeys = interp2app(W_ZipCache.iterkeys),
+ iterkeys = interp2app(W_ZipCache.iteratekeys),
values = interp2app(W_ZipCache.values),
itervalues = interp2app(W_ZipCache.itervalues),
clear = interp2app(W_ZipCache.clear),
From pypy.commits at gmail.com Tue Aug 9 10:22:56 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 07:22:56 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: iterkeys translation
issue resolved
Message-ID: <57a9e740.94a51c0a.3a1e7.3315@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86106:6f4bb70dde25
Date: 2016-08-09 16:22 +0200
http://bitbucket.org/pypy/pypy/changeset/6f4bb70dde25/
Log: iterkeys translation issue resolved
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1407,7 +1407,7 @@
if not space.ismapping_w(w_item):
raise oefmt(space.w_TypeError,
"'%T' object is not a mapping", w_item)
- iterator = w_item.iterkeys(w_item)
+ iterator = w_item.iterkeys()
while True:
w_key = iterator.next_key()
if w_key is None:
From pypy.commits at gmail.com Tue Aug 9 10:46:18 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 07:46:18 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Starting thread support:
RPY_REVDB_EMIT() should be called mostly when
Message-ID: <57a9ecba.c70a1c0a.38ae3.43ce@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86107:8009adcd405a
Date: 2016-08-09 16:45 +0200
http://bitbucket.org/pypy/pypy/changeset/8009adcd405a/
Log: Starting thread support: RPY_REVDB_EMIT() should be called mostly
when we hold the GIL, but ensuring that is messy. For now, we simply
use our own lock here.
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#ifdef __linux__
# define HAVE_PERSONALITY
@@ -119,17 +120,27 @@
setup_record_mode(*argc_p, *argv_p);
}
+static void reverse_db_lock_and_flush(void)
+{
+ _RPY_REVDB_LOCK();
+ rpy_reverse_db_flush();
+ _RPY_REVDB_UNLOCK();
+}
+
RPY_EXTERN
void rpy_reverse_db_teardown(void)
{
uint64_t stop_points;
- if (RPY_RDB_REPLAY) {
+ if (!RPY_RDB_REPLAY) {
+ _RPY_REVDB_LOCK();
+ }
+ else {
/* hack: prevents RPY_REVDB_EMIT() from calling
rpy_reverse_db_fetch(), which has nothing more to fetch now */
rpy_revdb.buf_limit += 1;
}
- RPY_REVDB_EMIT(stop_points = rpy_revdb.stop_point_seen; ,
- uint64_t _e, stop_points);
+ _RPY_REVDB_EMIT_L(stop_points = rpy_revdb.stop_point_seen; ,
+ uint64_t _e, stop_points, /*must_lock=*/0);
if (!RPY_RDB_REPLAY) {
rpy_reverse_db_flush();
@@ -137,6 +148,7 @@
close(rpy_rev_fileno);
rpy_rev_fileno = -1;
}
+ _RPY_REVDB_UNLOCK();
}
else
check_at_end(stop_points);
@@ -207,7 +219,7 @@
filename);
abort();
}
- atexit(rpy_reverse_db_flush);
+ atexit(reverse_db_lock_and_flush);
write_all(RDB_SIGNATURE, strlen(RDB_SIGNATURE));
for (i = 0; i < argc; i++) {
@@ -244,8 +256,12 @@
static void flush_buffer(void)
{
+ /* must be called with the lock held */
+ ssize_t full_size;
+ assert(rpy_revdb.lock);
+
/* write the current buffer content to the OS */
- ssize_t full_size = rpy_revdb.buf_p - rpy_rev_buffer;
+ full_size = rpy_revdb.buf_p - rpy_rev_buffer;
rpy_revdb.buf_p = rpy_rev_buffer + sizeof(int16_t);
if (rpy_rev_fileno >= 0)
write_all(rpy_rev_buffer, full_size);
@@ -253,13 +269,18 @@
static ssize_t current_packet_size(void)
{
+ /* must be called with the lock held */
return rpy_revdb.buf_p - (rpy_rev_buffer + sizeof(int16_t));
}
RPY_EXTERN
void rpy_reverse_db_flush(void)
{
- ssize_t content_size = current_packet_size();
+ /* must be called with the lock held */
+ ssize_t content_size;
+ assert(rpy_revdb.lock);
+
+ content_size = current_packet_size();
if (content_size != 0) {
char *p = rpy_rev_buffer;
assert(0 < content_size && content_size <= 32767);
@@ -268,6 +289,18 @@
}
}
+RPY_EXTERN
+void rpy_reverse_db_lock_acquire(void)
+{
+ while (1) {
+ if (rpy_revdb.lock == 0) {
+ if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) == 0)
+ break; /* done */
+ }
+ sched_yield();
+ }
+}
+
void boehm_gc_finalizer_notifier(void)
{
/* This is called by Boehm when there are pending finalizers.
@@ -303,6 +336,7 @@
int64_t done;
/* Write an ASYNC_FINALIZER_TRIGGER packet */
+ _RPY_REVDB_LOCK();
rpy_reverse_db_flush();
assert(current_packet_size() == 0);
@@ -310,6 +344,7 @@
memcpy(rpy_revdb.buf_p, &rpy_revdb.stop_point_seen, sizeof(uint64_t));
rpy_revdb.buf_p += sizeof(uint64_t);
flush_buffer();
+ _RPY_REVDB_UNLOCK();
/* Invoke all Boehm finalizers. For new-style finalizers, this
will only cause them to move to the queues, where
@@ -364,8 +399,10 @@
static uint64_t recording_offset(void)
{
+ /* must be called with the lock held */
off_t base_offset;
ssize_t extra_size = rpy_revdb.buf_p - rpy_rev_buffer;
+ assert(rpy_revdb.lock);
if (rpy_rev_fileno < 0)
return 1;
@@ -379,7 +416,10 @@
static void patch_prev_offset(int64_t offset, char old, char new)
{
+ /* must be called with the lock held */
off_t base_offset;
+ assert(rpy_revdb.lock);
+
if (rpy_rev_fileno < 0)
return;
base_offset = lseek(rpy_rev_fileno, 0, SEEK_CUR);
@@ -452,14 +492,18 @@
/* Emit WEAKREF_AFTERWARDS_DEAD, but remember where we emit it.
If we deref the weakref and it is still alive, we will patch
it with WEAKREF_AFTERWARDS_ALIVE. */
- if (!RPY_RDB_REPLAY)
+ if (!RPY_RDB_REPLAY) {
+ _RPY_REVDB_LOCK();
r->re_off_prev = recording_offset();
+ }
else
r->re_off_prev = 1; /* any number > 0 */
- RPY_REVDB_EMIT(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive);
+ _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive,
+ /*must_lock=*/0);
if (!RPY_RDB_REPLAY) {
+ _RPY_REVDB_UNLOCK();
OP_BOEHM_DISAPPEARING_LINK(&r->re_addr, target, /*nothing*/);
}
else {
@@ -498,13 +542,18 @@
else {
char alive;
if (!RPY_RDB_REPLAY) {
+ _RPY_REVDB_LOCK();
patch_prev_offset(r->re_off_prev, WEAKREF_AFTERWARDS_DEAD,
WEAKREF_AFTERWARDS_ALIVE);
r->re_off_prev = recording_offset();
}
- RPY_REVDB_EMIT(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive);
+ _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive,
+ /*must_lock=*/0);
- if (RPY_RDB_REPLAY) {
+ if (!RPY_RDB_REPLAY) {
+ _RPY_REVDB_UNLOCK();
+ }
+ else {
switch (alive) {
case WEAKREF_AFTERWARDS_DEAD:
r->re_addr = NULL;
@@ -527,8 +576,10 @@
locnum += 300;
assert(locnum < 0xFC00);
if (!RPY_RDB_REPLAY) {
- _RPY_REVDB_EMIT_RECORD(unsigned char _e, (locnum >> 8));
- _RPY_REVDB_EMIT_RECORD(unsigned char _e, (locnum & 0xFF));
+ _RPY_REVDB_LOCK();
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, (locnum >> 8));
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, (locnum & 0xFF));
+ _RPY_REVDB_UNLOCK();
}
}
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -1,21 +1,21 @@
#include
+#include "src/thread.h"
-/* By default, this makes an executable which supports both recording
- and replaying. It should help avoid troubles like using for
- replaying an executable that is slightly different than the one
- used for recording. In theory you can compile with
- -DRPY_RDB_REPLAY=0 or -DRPY_RDB_REPLAY=1 to get only one version
- compiled for it, which should be slightly faster (not tested so
- far).
-*/
+/************************************************************
+ *** RevDB --- record and replay debugging ***
+ ************************************************************/
+
typedef struct {
#ifndef RPY_RDB_REPLAY
bool_t replay;
#define RPY_RDB_REPLAY rpy_revdb.replay
#define RPY_RDB_DYNAMIC_REPLAY
+#else
+# error "explicit RPY_RDB_REPLAY: not really supported"
#endif
bool_t watch_enabled;
+ long lock;
char *buf_p, *buf_limit, *buf_readend;
uint64_t stop_point_seen, stop_point_break;
uint64_t unique_id_seen, unique_id_break;
@@ -59,7 +59,19 @@
#endif
-#define _RPY_REVDB_EMIT_RECORD(decl_e, variable) \
+/* Acquire/release the lock around EMIT_RECORD, because it may be
+ called without holding the GIL. Note that we're always
+ single-threaded during replaying: the lock is only useful during
+ recording. */
+#define _RPY_REVDB_LOCK() \
+ if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) != 0) \
+ rpy_reverse_db_lock_acquire();
+
+#define _RPY_REVDB_UNLOCK() \
+ pypy_lock_release(&rpy_revdb.lock)
+
+
+#define _RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
{ \
decl_e = variable; \
_RPY_REVDB_PRINT("write", _e); \
@@ -81,21 +93,28 @@
variable = _e; \
}
-#define RPY_REVDB_EMIT(normal_code, decl_e, variable) \
+#define _RPY_REVDB_EMIT_L(normal_code, decl_e, variable, must_lock) \
if (!RPY_RDB_REPLAY) { \
normal_code \
- _RPY_REVDB_EMIT_RECORD(decl_e, variable) \
+ if (must_lock) _RPY_REVDB_LOCK(); \
+ _RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
+ if (must_lock) _RPY_REVDB_UNLOCK(); \
} else \
_RPY_REVDB_EMIT_REPLAY(decl_e, variable)
+#define RPY_REVDB_EMIT(normal_code, decl_e, variable) \
+ _RPY_REVDB_EMIT_L(normal_code, decl_e, variable, 1)
+
#define RPY_REVDB_EMIT_VOID(normal_code) \
if (!RPY_RDB_REPLAY) { normal_code } else { }
#define RPY_REVDB_CALL(call_code, decl_e, variable) \
if (!RPY_RDB_REPLAY) { \
call_code \
- _RPY_REVDB_EMIT_RECORD(unsigned char _e, 0xFC) \
- _RPY_REVDB_EMIT_RECORD(decl_e, variable) \
+ _RPY_REVDB_LOCK(); \
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFC) \
+ _RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
+ _RPY_REVDB_UNLOCK(); \
} else { \
unsigned char _re; \
_RPY_REVDB_EMIT_REPLAY(unsigned char _e, _re) \
@@ -107,7 +126,9 @@
#define RPY_REVDB_CALL_VOID(call_code) \
if (!RPY_RDB_REPLAY) { \
call_code \
- _RPY_REVDB_EMIT_RECORD(unsigned char _e, 0xFC) \
+ _RPY_REVDB_LOCK(); \
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFC) \
+ _RPY_REVDB_UNLOCK(); \
} \
else { \
unsigned char _re; \
@@ -174,7 +195,7 @@
#define RPY_REVDB_CAST_PTR_TO_INT(obj) (((struct pypy_header0 *)obj)->h_uid)
-RPY_EXTERN void rpy_reverse_db_flush(void);
+RPY_EXTERN void rpy_reverse_db_flush(void); /* must be called with the lock */
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
RPY_EXTERN void rpy_reverse_db_stop_point(long place);
RPY_EXTERN void rpy_reverse_db_send_answer(int cmd, int64_t arg1, int64_t arg2,
@@ -193,5 +214,6 @@
RPY_EXTERN void rpy_reverse_db_call_destructor(void *obj);
RPY_EXTERN void rpy_reverse_db_invoke_callback(unsigned char);
RPY_EXTERN void rpy_reverse_db_callback_loc(int);
+RPY_EXTERN void rpy_reverse_db_lock_acquire(void);
/* ------------------------------------------------------------ */
From pypy.commits at gmail.com Tue Aug 9 10:52:34 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:34 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Fix test_dictproxy.py to
actually match the expected behaviour
Message-ID: <57a9ee32.c1e31c0a.c1830.168b@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86108:50212a6235fd
Date: 2016-08-03 04:59 +0100
http://bitbucket.org/pypy/pypy/changeset/50212a6235fd/
Log: Fix test_dictproxy.py to actually match the expected behaviour
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -9,37 +9,19 @@
assert 'a' in NotEmpty.__dict__
assert 'a' in NotEmpty.__dict__.keys()
assert 'b' not in NotEmpty.__dict__
- NotEmpty.__dict__['b'] = 4
- assert NotEmpty.b == 4
- del NotEmpty.__dict__['b']
assert NotEmpty.__dict__.get("b") is None
+ raises(TypeError, "NotEmpty.__dict__['b'] = 4")
raises(TypeError, 'NotEmpty.__dict__[15] = "y"')
- raises(KeyError, 'del NotEmpty.__dict__[15]')
+ raises(TypeError, 'del NotEmpty.__dict__[15]')
- assert NotEmpty.__dict__.setdefault("string", 1) == 1
- assert NotEmpty.__dict__.setdefault("string", 2) == 1
- assert NotEmpty.string == 1
- raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
-
- def test_dictproxy_popitem(self):
- class A(object):
- a = 42
- seen = 0
- try:
- while True:
- key, value = A.__dict__.popitem()
- if key == 'a':
- assert value == 42
- seen += 1
- except KeyError:
- pass
- assert seen == 1
+ raises(AttributeError, 'NotEmpty.__dict__.setdefault')
def test_dictproxy_getitem(self):
class NotEmpty(object):
a = 1
assert 'a' in NotEmpty.__dict__
- class substr(str): pass
+ class substr(str):
+ pass
assert substr('a') in NotEmpty.__dict__
assert u'a' in NotEmpty.__dict__
assert NotEmpty.__dict__[u'a'] == 1
@@ -62,15 +44,37 @@
class a(object):
pass
s1 = repr(a.__dict__)
+ assert s1.startswith('dict_proxy({') and s1.endswith('})')
s2 = str(a.__dict__)
- assert s1 == s2
- assert s1.startswith('{') and s1.endswith('}')
+ assert s1 == 'dict_proxy(%s)' % s2
def test_immutable_dict_on_builtin_type(self):
raises(TypeError, "int.__dict__['a'] = 1")
- raises(TypeError, int.__dict__.popitem)
- raises(TypeError, int.__dict__.clear)
+ raises((AttributeError, TypeError), "int.__dict__.popitem()")
+ raises((AttributeError, TypeError), "int.__dict__.clear()")
+
+ def test_dictproxy(self):
+ dictproxy = type(int.__dict__)
+ assert dictproxy is not dict
+ assert dictproxy.__name__ == 'dictproxy'
+ raises(TypeError, dictproxy)
+
+ mapping = {'a': 1}
+ raises(TypeError, dictproxy, mapping)
+
+ class A(object):
+ a = 1
+
+ proxy = A.__dict__
+ mapping = dict(proxy)
+ assert proxy['a'] == 1
+ assert 'a' in proxy
+ assert 'z' not in proxy
+ assert repr(proxy) == 'dict_proxy(%r)' % mapping
+ assert proxy.keys() == mapping.keys()
+ raises(TypeError, "proxy['a'] = 4")
+ raises(TypeError, "del proxy['a']")
+ raises(AttributeError, "proxy.clear()")
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
-
From pypy.commits at gmail.com Tue Aug 9 10:52:36 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:36 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Rename DictProxyStrategy to
ClassDictStrategy and dictproxyobject.py to classdict.py
Message-ID: <57a9ee34.2624c20a.7a5a1.45ba@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86109:82c4a72260a7
Date: 2016-08-02 20:01 +0100
http://bitbucket.org/pypy/pypy/changeset/82c4a72260a7/
Log: Rename DictProxyStrategy to ClassDictStrategy and dictproxyobject.py
to classdict.py
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/classdict.py
rename from pypy/objspace/std/dictproxyobject.py
rename to pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/classdict.py
@@ -7,7 +7,7 @@
from pypy.objspace.std.typeobject import unwrap_cell
-class DictProxyStrategy(DictStrategy):
+class ClassDictStrategy(DictStrategy):
erase, unerase = rerased.new_erasing_pair("dictproxy")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
@@ -112,4 +112,4 @@
def wrapvalue(space, value):
return unwrap_cell(space, value)
-create_iterator_classes(DictProxyStrategy)
+create_iterator_classes(ClassDictStrategy)
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -486,11 +486,11 @@
del self.lazyloaders
def getdict(self, space): # returning a dict-proxy!
- from pypy.objspace.std.dictproxyobject import DictProxyStrategy
+ from pypy.objspace.std.classdict import ClassDictStrategy
from pypy.objspace.std.dictmultiobject import W_DictObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
- strategy = space.fromcache(DictProxyStrategy)
+ strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
return W_DictObject(space, strategy, storage)
From pypy.commits at gmail.com Tue Aug 9 10:52:39 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:39 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Do not attempt to modify a
class dict, use setattr instead
Message-ID: <57a9ee37.cb7f1c0a.705ce.3d11@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86111:d10201ca7dc8
Date: 2016-08-04 16:29 +0100
http://bitbucket.org/pypy/pypy/changeset/d10201ca7dc8/
Log: Do not attempt to modify a class dict, use setattr instead
diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py
--- a/pypy/module/cppyy/pythonify.py
+++ b/pypy/module/cppyy/pythonify.py
@@ -175,7 +175,7 @@
"__new__" : make_new(class_name),
}
pycppclass = metacpp(class_name, _drop_cycles(bases), d)
-
+
# cache result early so that the class methods can find the class itself
setattr(scope, final_class_name, pycppclass)
@@ -192,13 +192,10 @@
for dm_name in cppclass.get_datamember_names():
cppdm = cppclass.get_datamember(dm_name)
- # here, setattr() can not be used, because a data member can shadow one in
- # its base class, resulting in the __set__() of its base class being called
- # by setattr(); so, store directly on the dictionary
- pycppclass.__dict__[dm_name] = cppdm
+ setattr(pycppclass, dm_name, cppdm)
import cppyy
if cppyy._is_static(cppdm): # TODO: make this a method of cppdm
- metacpp.__dict__[dm_name] = cppdm
+ setattr(metacpp, dm_name, cppdm)
# the call to register will add back-end specific pythonizations and thus
# needs to run first, so that the generic pythonizations can use them
@@ -413,7 +410,7 @@
lib = cppyy._load_dictionary(name)
_loaded_dictionaries[name] = lib
return lib
-
+
def _init_pythonify():
# cppyy should not be loaded at the module level, as that will trigger a
# call to space.getbuiltinmodule(), which will cause cppyy to be loaded
From pypy.commits at gmail.com Tue Aug 9 10:52:41 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:41 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Add failing cpyext test
clarifying the expected behaviour of type->tp_dict
Message-ID: <57a9ee39.c3881c0a.4cd4f.464f@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86112:b2d8c440375b
Date: 2016-08-04 18:28 +0100
http://bitbucket.org/pypy/pypy/changeset/b2d8c440375b/
Log: Add failing cpyext test clarifying the expected behaviour of
type->tp_dict
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -282,11 +282,23 @@
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
+ '''),
+ ("get_type_dict", "METH_O",
'''
- )
+ PyObject* value = args->ob_type->tp_dict;
+ if (value == NULL) value = Py_None;
+ Py_INCREF(value);
+ return value;
+ '''),
])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
+ assert type(module.get_type_dict(obj)) is dict
+ d = module.get_type_dict(1)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
From pypy.commits at gmail.com Tue Aug 9 10:52:37 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:37 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Backport W_DictProxyObject
from py3k
Message-ID: <57a9ee35.17a71c0a.822ba.3f84@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86110:786fadddf616
Date: 2016-08-09 15:15 +0100
http://bitbucket.org/pypy/pypy/changeset/786fadddf616/
Log: Backport W_DictProxyObject from py3k
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -0,0 +1,86 @@
+# Read-only proxy for mappings. PyPy does not have a separate type for
+# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
+
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
+from pypy.interpreter.typedef import TypeDef, interp2app
+
+class W_DictProxyObject(W_Root):
+ "Read-only proxy for mappings."
+
+ def __init__(self, w_mapping):
+ self.w_mapping = w_mapping
+
+ @staticmethod
+ def descr_new(space, w_type, w_mapping):
+ raise oefmt(space.w_TypeError, "Cannot create 'dictproxy' instances")
+
+ def descr_init(self, space, __args__):
+ pass
+
+ def descr_len(self, space):
+ return space.len(self.w_mapping)
+
+ def descr_getitem(self, space, w_key):
+ return space.getitem(self.w_mapping, w_key)
+
+ def descr_contains(self, space, w_key):
+ return space.contains(self.w_mapping, w_key)
+
+ def descr_iter(self, space):
+ return space.iter(self.w_mapping)
+
+ def descr_str(self, space):
+ return space.str(self.w_mapping)
+
+ def descr_repr(self, space):
+ return space.wrap("dict_proxy(%s)" %
+ (space.str_w(space.repr(self.w_mapping)),))
+
+ @unwrap_spec(w_default=WrappedDefault(None))
+ def get_w(self, space, w_key, w_default):
+ return space.call_method(self.w_mapping, "get", w_key, w_default)
+
+ def keys_w(self, space):
+ return space.call_method(self.w_mapping, "keys")
+
+ def values_w(self, space):
+ return space.call_method(self.w_mapping, "values")
+
+ def items_w(self, space):
+ return space.call_method(self.w_mapping, "items")
+
+ def copy_w(self, space):
+ return space.call_method(self.w_mapping, "copy")
+
+cmp_methods = {}
+def make_cmp_method(op):
+ def descr_op(self, space, w_other):
+ return getattr(space, op)(self.w_mapping, w_other)
+ descr_name = 'descr_' + op
+ descr_op.__name__ = descr_name
+ setattr(W_DictProxyObject, descr_name, descr_op)
+ cmp_methods['__%s__' % op] = interp2app(getattr(W_DictProxyObject, descr_name))
+
+for op in ['eq', 'ne', 'gt', 'ge', 'lt', 'le']:
+ make_cmp_method(op)
+
+
+W_DictProxyObject.typedef = TypeDef(
+ 'dictproxy',
+ __new__=interp2app(W_DictProxyObject.descr_new),
+ __init__=interp2app(W_DictProxyObject.descr_init),
+ __len__=interp2app(W_DictProxyObject.descr_len),
+ __getitem__=interp2app(W_DictProxyObject.descr_getitem),
+ __contains__=interp2app(W_DictProxyObject.descr_contains),
+ __iter__=interp2app(W_DictProxyObject.descr_iter),
+ __str__=interp2app(W_DictProxyObject.descr_str),
+ __repr__=interp2app(W_DictProxyObject.descr_repr),
+ get=interp2app(W_DictProxyObject.get_w),
+ keys=interp2app(W_DictProxyObject.keys_w),
+ values=interp2app(W_DictProxyObject.values_w),
+ items=interp2app(W_DictProxyObject.items_w),
+ copy=interp2app(W_DictProxyObject.copy_w),
+ **cmp_methods
+)
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -968,7 +968,6 @@
raises(TypeError, setattr, list, 'foobar', 42)
raises(TypeError, delattr, dict, 'keys')
raises(TypeError, 'int.__dict__["a"] = 1')
- raises(TypeError, 'int.__dict__.clear()')
def test_nontype_in_mro(self):
class OldStyle:
@@ -1026,10 +1025,9 @@
pass
a = A()
+ d = A.__dict__
A.x = 1
- assert A.__dict__["x"] == 1
- A.__dict__['x'] = 5
- assert A.x == 5
+ assert d["x"] == 1
def test_we_already_got_one_1(self):
# Issue #2079: highly obscure: CPython complains if we say
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -488,11 +488,13 @@
def getdict(self, space): # returning a dict-proxy!
from pypy.objspace.std.classdict import ClassDictStrategy
from pypy.objspace.std.dictmultiobject import W_DictObject
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- return W_DictObject(space, strategy, storage)
+ w_dict = W_DictObject(space, strategy, storage)
+ return W_DictProxyObject(w_dict)
def is_heaptype(self):
return self.flag_heaptype
From pypy.commits at gmail.com Tue Aug 9 10:52:47 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:47 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Cleanup
Message-ID: <57a9ee3f.c70a1c0a.38ae3.46ca@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86115:9d629ea0f59c
Date: 2016-08-08 18:45 +0100
http://bitbucket.org/pypy/pypy/changeset/9d629ea0f59c/
Log: Cleanup
diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/classdict.py
+++ b/pypy/objspace/std/classdict.py
@@ -85,7 +85,8 @@
return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys())
def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
+ return [unwrap_cell(self.space, w_value) for w_value in
+ self.unerase(w_dict.dstorage).dict_w.itervalues()]
def items(self, w_dict):
space = self.space
@@ -103,12 +104,16 @@
def getiterkeys(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.iterkeys()
+
def getitervalues(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.itervalues()
+
def getiteritems_with_hash(self, w_dict):
return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
+
def wrapkey(space, key):
return space.wrap(key)
+
def wrapvalue(space, value):
return unwrap_cell(space, value)
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,5 +1,8 @@
-# Read-only proxy for mappings. PyPy does not have a separate type for
-# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
+"""
+Read-only proxy for mappings.
+
+Its main use is as the return type of cls.__dict__.
+"""
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import oefmt
From pypy.commits at gmail.com Tue Aug 9 10:52:43 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:43 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Make cls->tp_dict be a real
dict while cls.__dict__ returns a proxy
Message-ID: <57a9ee3b.919a1c0a.5a71a.4b87@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86113:3555af06864f
Date: 2016-08-04 18:58 +0100
http://bitbucket.org/pypy/pypy/changeset/3555af06864f/
Log: Make cls->tp_dict be a real dict while cls.__dict__ returns a proxy
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -293,12 +293,21 @@
])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
- assert type(module.get_type_dict(obj)) is dict
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
d = module.get_type_dict(1)
assert type(d) is dict
- d["_some_attribute"] = 1
- assert int._some_attribute == 1
- del d["_some_attribute"]
+ try:
+ d["_some_attribute"] = 1
+ except TypeError: # on PyPy, int.__dict__ is really immutable
+ pass
+ else:
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -272,12 +272,12 @@
if len(slot_names) == 1:
if not getattr(pto, slot_names[0]):
setattr(pto, slot_names[0], slot_func_helper)
- elif (w_type.getname(space) in ('list', 'tuple') and
+ elif (w_type.getname(space) in ('list', 'tuple') and
slot_names[0] == 'c_tp_as_number'):
# XXX hack - hwo can we generalize this? The problem is method
# names like __mul__ map to more than one slot, and we have no
# convenient way to indicate which slots CPython have filled
- #
+ #
# We need at least this special case since Numpy checks that
# (list, tuple) do __not__ fill tp_as_number
pass
@@ -860,8 +860,8 @@
if w_obj.is_cpytype():
Py_DecRef(space, pto.c_tp_dict)
- w_dict = w_obj.getdict(space)
- pto.c_tp_dict = make_ref(space, w_dict)
+ w_dict = w_obj.getdict(space)
+ pto.c_tp_dict = make_ref(space, w_dict)
@cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyType_IsSubtype(space, a, b):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -3,8 +3,8 @@
from pypy.interpreter.baseobjspace import W_Root, SpaceCache
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Function, StaticMethod
-from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
- descr_get_dict, dict_descr, Member, TypeDef
+from pypy.interpreter.typedef import (
+ weakref_descr, GetSetProperty, dict_descr, Member, TypeDef)
from pypy.interpreter.astcompiler.misc import mangle
from pypy.module.__builtin__ import abstractinst
@@ -485,16 +485,14 @@
self.getdictvalue(self.space, attr)
del self.lazyloaders
- def getdict(self, space): # returning a dict-proxy!
+ def getdict(self, space):
from pypy.objspace.std.classdict import ClassDictStrategy
from pypy.objspace.std.dictmultiobject import W_DictObject
- from pypy.objspace.std.dictproxyobject import W_DictProxyObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- w_dict = W_DictObject(space, strategy, storage)
- return W_DictProxyObject(w_dict)
+ return W_DictObject(space, strategy, storage)
def is_heaptype(self):
return self.flag_heaptype
@@ -912,13 +910,20 @@
return space.newbool(
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
+def type_get_dict(space, w_cls):
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ w_dict = w_cls.getdict(space)
+ if w_dict is None:
+ return space.w_None
+ return W_DictProxyObject(w_dict)
+
W_TypeObject.typedef = TypeDef("type",
__new__ = gateway.interp2app(descr__new__),
__name__ = GetSetProperty(descr_get__name__, descr_set__name__),
__bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__),
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
- __dict__ = GetSetProperty(descr_get_dict),
+ __dict__=GetSetProperty(type_get_dict),
__doc__ = GetSetProperty(descr__doc),
mro = gateway.interp2app(descr_mro),
__flags__ = GetSetProperty(descr__flags),
From pypy.commits at gmail.com Tue Aug 9 10:52:45 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 07:52:45 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Allow attribute deletion on
C-defined types
Message-ID: <57a9ee3d.12331c0a.ea209.47ae@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86114:5cb67984053b
Date: 2016-08-05 16:47 +0100
http://bitbucket.org/pypy/pypy/changeset/5cb67984053b/
Log: Allow attribute deletion on C-defined types
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -299,6 +299,15 @@
assert type(obj)._some_attribute == 1
del d["_some_attribute"]
+ class A(object):
+ pass
+ obj = A()
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
d = module.get_type_dict(1)
assert type(d) is dict
try:
@@ -376,6 +385,21 @@
api.Py_DecRef(ref)
+ def test_type_dict(self, space, api):
+ w_class = space.appexec([], """():
+ class A(object):
+ pass
+ return A
+ """)
+ ref = make_ref(space, w_class)
+
+ py_type = rffi.cast(PyTypeObjectPtr, ref)
+ w_dict = from_ref(space, py_type.c_tp_dict)
+ w_name = space.wrap('a')
+ space.setitem(w_dict, w_name, space.wrap(1))
+ assert space.int_w(space.getattr(w_class, w_name)) == 1
+ space.delitem(w_dict, w_name)
+
def test_multiple_inheritance(self, space, api):
w_class = space.appexec([], """():
class A(object):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -346,7 +346,7 @@
def deldictvalue(self, space, key):
if self.lazyloaders:
self._cleanup_() # force un-lazification
- if not self.is_heaptype():
+ if not (self.is_heaptype() or self.is_cpytype()):
raise oefmt(space.w_TypeError,
"can't delete attributes on type object '%N'", self)
try:
From pypy.commits at gmail.com Tue Aug 9 11:38:55 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 08:38:55 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: assert is instance Module,
rpython deduces it is a W_Root
Message-ID: <57a9f90f.09afc20a.998b.433f@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86116:435335a96064
Date: 2016-08-09 17:38 +0200
http://bitbucket.org/pypy/pypy/changeset/435335a96064/
Log: assert is instance Module, rpython deduces it is a W_Root properly
wrap for load const
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -1310,7 +1310,7 @@
nsubkwargs += 1
elif nsubkwargs:
# A keyword argument and we already have a dict.
- self.load_const(kw.arg)
+ self.load_const(self.space.wrap(kw.arg.decode('utf-8')))
kw.value.walkabout(self)
nseen += 1
else:
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -85,6 +85,7 @@
pathname = "" % modulename
code_w = ec.compiler.compile(source, pathname, 'exec', 0)
w_mod = add_module(space, space.wrap(modulename))
+ assert isinstance(w_mod, Module) # XXX why is that necessary?
space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name)
code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
From pypy.commits at gmail.com Tue Aug 9 11:53:55 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Tue, 09 Aug 2016 08:53:55 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Merge with py3.5-async-translate
Message-ID: <57a9fc93.a717c20a.155f5.591a@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86117:b7ea325def94
Date: 2016-08-09 17:53 +0200
http://bitbucket.org/pypy/pypy/changeset/b7ea325def94/
Log: Merge with py3.5-async-translate
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -1310,7 +1310,7 @@
nsubkwargs += 1
elif nsubkwargs:
# A keyword argument and we already have a dict.
- self.load_const(kw.arg)
+ self.load_const(self.space.wrap(kw.arg.decode('utf-8')))
kw.value.walkabout(self)
nseen += 1
else:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -581,12 +581,13 @@
# position, then raise a GeneratorExit. Otherwise, there is
# no point.
# If coroutine was never awaited on issue a RuntimeWarning.
- if self.pycode is not None:
- if self.frame is not None:
- if self.frame.fget_f_lasti(self.frame).int_w(self.space) == -1:
- raise oefmt(space.w_RuntimeWarning,
- "coroutine '%s' was never awaited",
- self.pycode.co_name)
+ if self.pycode is not None and \
+ self.frame is not None and \
+ self.frame.last_instr == -1:
+ # XXX PyErr_Occured in condition?
+ raise oefmt(self.space.w_RuntimeWarning,
+ "coroutine '%s' was never awaited",
+ self.pycode.co_name)
if self.frame is not None:
block = self.frame.lastblock
while block is not None:
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -6,7 +6,7 @@
from rpython.rlib import jit, rstackovf, rstring
from rpython.rlib.debug import check_nonneg
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, always_inline
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.tool.sourcetools import func_with_new_name
@@ -45,6 +45,26 @@
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
+def get_func_desc(space, func):
+ if isinstance(func,function.Function):
+ return "()"
+ elif isinstance(func, function.Method):
+ return "()"
+ else:
+ return " object";
+
+ at always_inline
+def list_unpack_helper(frame, itemcount):
+ space = frame.space
+ w_sum = space.newlist([], sizehint=itemcount)
+ for i in range(itemcount, 0, -1):
+ w_item = frame.peekvalue(i-1)
+ w_sum.extend(w_item)
+ while itemcount != 0:
+ frame.popvalue()
+ itemcount -= 1
+ return w_sum
+
opcodedesc = bytecode_spec.opcodedesc
HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT
@@ -1351,74 +1371,73 @@
self.space.call_method(w_set, 'add', w_item)
self.pushvalue(w_set)
- def unpack_helper(self, itemcount, next_instr):
- w_sum = []
+ def BUILD_SET_UNPACK(self, itemcount, next_instr):
+ space = self.space
+ w_sum = space.newset()
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
- items = self.space.fixedview(w_item)
- w_sum.extend(items)
+ # cannot use w_sum.update, w_item might not be a set
+ iterator = w_item.itervalues()
+ while True:
+ w_value = iterator.next_value()
+ if w_value is None:
+ break
+ w_sum.add(w_value)
while itemcount != 0:
self.popvalue()
itemcount -= 1
- return w_sum
-
- def BUILD_SET_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newset(w_sum))
+ self.pushvalue(w_sum)
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newtuple(w_sum))
-
+ w_list = list_unpack_helper(self, itemcount)
+ items = [w_obj for w_obj in w_list.getitems_unroll()]
+ self.pushvalue(self.space.newtuple(items))
+
def BUILD_LIST_UNPACK(self, itemcount, next_instr):
- w_sum = self.unpack_helper(itemcount, next_instr)
- self.pushvalue(self.space.newlist(w_sum))
-
- def getFuncDesc(self, func):
- if self.space.type(func).name.decode('utf-8') == 'method':
- return "()"
- elif self.space.type(func).name.decode('utf-8') == 'function':
- return "()"
- else:
- return " object";
-
+ w_sum = list_unpack_helper(self, itemcount)
+ self.pushvalue(w_sum)
+
def BUILD_MAP_UNPACK_WITH_CALL(self, itemcount, next_instr):
+ space = self.space
num_maps = itemcount & 0xff
function_location = (itemcount>>8) & 0xff
- w_dict = self.space.newdict()
- dict_class = w_dict.__class__
+ w_dict = space.newdict()
for i in range(num_maps, 0, -1):
w_item = self.peekvalue(i-1)
- if not issubclass(w_item.__class__, dict_class):
- raise oefmt(self.space.w_TypeError,
+ if not space.ismapping_w(w_item):
+ raise oefmt(space.w_TypeError,
"'%T' object is not a mapping", w_item)
- num_items = w_item.length()
- keys = w_item.w_keys()
- for j in range(num_items):
- if self.space.type(keys.getitem(j)).name.decode('utf-8') == 'method':
+ iterator = w_item.iterkeys()
+ while True:
+ w_key = iterator.next_key()
+ if w_key is None:
+ break
+ if not isinstance(w_key, space.UnicodeObjectCls):
err_fun = self.peekvalue(num_maps + function_location-1)
- raise oefmt(self.space.w_TypeError,
- "%N%s keywords must be strings", err_fun, getFuncDesc(err_fun))
- if self.space.is_true(self.space.contains(w_dict,keys.getitem(j))):
+ raise oefmt(space.w_TypeError,
+ "%N%s keywords must be strings", err_fun,
+ get_func_desc(space, err_fun))
+ if space.is_true(space.contains(w_dict,w_key)):
err_fun = self.peekvalue(num_maps + function_location-1)
- err_arg = self.space.unicode_w(keys.getitem(j))
- raise oefmt(self.space.w_TypeError,
- "%N%s got multiple values for keyword argument '%s'", err_fun, self.getFuncDesc(err_fun), err_arg)
- self.space.call_method(w_dict, 'update', w_item)
+ err_arg = w_key
+ raise oefmt(space.w_TypeError,
+ "%N%s got multiple values for keyword argument '%s'",
+ err_fun, get_func_desc(space, err_fun), space.str_w(err_arg))
+ space.call_method(w_dict, 'update', w_item)
while num_maps != 0:
self.popvalue()
num_maps -= 1
self.pushvalue(w_dict)
-
+
def BUILD_MAP_UNPACK(self, itemcount, next_instr):
- w_dict = self.space.newdict()
- dict_class = w_dict.__class__
+ space = self.space
+ w_dict = space.newdict()
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
- if not issubclass(w_item.__class__, dict_class):
+ if not space.ismapping_w(w_item):
raise oefmt(self.space.w_TypeError,
"'%T' object is not a mapping", w_item)
- self.space.call_method(w_dict, 'update', w_item)
+ space.call_method(w_dict, 'update', w_item)
while itemcount != 0:
self.popvalue()
itemcount -= 1
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -1,9 +1,13 @@
class AppTestAsyncIO(object):
- spaceconfig = dict(usemodules=["select","_socket","thread","signal","struct","_multiprocessing","array","_posixsubprocess","fcntl","unicodedata"])
+ spaceconfig = dict(usemodules=["select","_socket","thread","signal",
+ "struct","_multiprocessing","array",
+ "_posixsubprocess","fcntl",
+ "unicodedata"])
def test_gil_issue(self):
- # the problem occured at await asyncio.open_connection after calling run_until_complete
+ # the problem occured at await asyncio.open_connection
+ # after calling run_until_complete
"""
import encodings.idna
import asyncio
@@ -11,4 +15,6 @@
reader, writer = await asyncio.open_connection('example.com', 80)
loop = asyncio.get_event_loop()
- loop.run_until_complete(f())"""
+ loop.run_until_complete(f())
+ print("done with async loop")
+ """
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -85,6 +85,7 @@
pathname = "" % modulename
code_w = ec.compiler.compile(source, pathname, 'exec', 0)
w_mod = add_module(space, space.wrap(modulename))
+ assert isinstance(w_mod, Module) # XXX why is that necessary?
space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod)
space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name)
code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict)
diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py
--- a/pypy/module/zipimport/interp_zipimport.py
+++ b/pypy/module/zipimport/interp_zipimport.py
@@ -82,7 +82,7 @@
for key in self.cache.keys()]
return space.newlist(items_w)
- def iterkeys(self, space):
+ def iteratekeys(self, space):
return space.iter(self.keys(space))
def itervalues(self, space):
@@ -106,11 +106,11 @@
'zip_dict',
__getitem__ = interp2app(W_ZipCache.getitem),
__contains__ = interp2app(W_ZipCache.contains),
- __iter__ = interp2app(W_ZipCache.iterkeys),
+ __iter__ = interp2app(W_ZipCache.iteratekeys),
items = interp2app(W_ZipCache.items),
iteritems = interp2app(W_ZipCache.iteritems),
keys = interp2app(W_ZipCache.keys),
- iterkeys = interp2app(W_ZipCache.iterkeys),
+ iterkeys = interp2app(W_ZipCache.iteratekeys),
values = interp2app(W_ZipCache.values),
itervalues = interp2app(W_ZipCache.itervalues),
clear = interp2app(W_ZipCache.clear),
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -198,7 +198,6 @@
def get_native_fmtchar(self, fmt):
from rpython.rtyper.lltypesystem import rffi
- from sys import getsizeof
size = -1
if fmt[0] == '@':
f = fmt[1]
@@ -215,7 +214,7 @@
elif f == 'q' or f == 'Q':
size = rffi.sizeof(rffi.LONGLONG)
elif f == 'n' or f == 'N':
- size = getsizeof(rffi.r_ssize_t)
+ size = rffi.sizeof(rffi.SIZE_T)
elif f == 'f':
size = rffi.sizeof(rffi.FLOAT)
elif f == 'd':
@@ -225,7 +224,7 @@
elif f == 'P':
size = rffi.sizeof(rffi.VOIDP)
return size
-
+
def descr_cast(self, space, w_format, w_shape=None):
# XXX fixme. does not do anything near cpython (see memoryobjet.c memory_cast)
self._check_released(space)
From pypy.commits at gmail.com Tue Aug 9 12:05:55 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 09:05:55 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: in-progress
Message-ID: <57a9ff63.a719c20a.af26b.fdb1@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86118:9a959ce7e35a
Date: 2016-08-09 18:05 +0200
http://bitbucket.org/pypy/pypy/changeset/9a959ce7e35a/
Log: in-progress
diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py
--- a/rpython/config/translationoption.py
+++ b/rpython/config/translationoption.py
@@ -284,7 +284,6 @@
requires=[('translation.split_gc_address_space', True),
('translation.jit', False),
('translation.gc', 'boehm'),
- ('translation.thread', False),
('translation.continuation', False)]),
])
diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h
--- a/rpython/translator/c/src/thread.h
+++ b/rpython/translator/c/src/thread.h
@@ -26,6 +26,10 @@
#endif /* !_WIN32 */
+#ifdef RPY_REVERSE_DEBUGGER
+RPY_EXTERN void rpy_reverse_db_thread_switch(void);
+#endif
+
RPY_EXTERN void RPyGilAllocate(void);
RPY_EXTERN long RPyGilYieldThread(void);
RPY_EXTERN void RPyGilAcquireSlowPath(long);
@@ -45,6 +49,9 @@
long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
if (old_fastgil != 0)
RPyGilAcquireSlowPath(old_fastgil);
+#ifdef RPY_REVERSE_DEBUGGER
+ rpy_reverse_db_thread_switch();
+#endif
}
static inline void _RPyGilRelease(void) {
assert(RPY_FASTGIL_LOCKED(rpy_fastgil));
diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py
--- a/rpython/translator/revdb/gencsupp.py
+++ b/rpython/translator/revdb/gencsupp.py
@@ -82,9 +82,10 @@
'_revdb_do_all_calls_', False):
return call_code # a hack for ll_call_destructor() to mean
# that the calls should really be done
- # haaaaack
+ #
+ # hack: we don't need the flag for at least these two common functions
if call_code in ('RPyGilRelease();', 'RPyGilAcquire();'):
- return '/* ' + call_code + ' */'
+ return 'RPY_REVDB_CALL_GILCTRL(%s);' % (call_code,)
#
tp = funcgen.lltypename(v_result)
if tp == 'void @':
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -35,6 +35,7 @@
#define WEAKREF_AFTERWARDS_ALIVE ((char)0xeb)
#define ASYNC_FINALIZER_TRIGGER ((int16_t)0xff46)
+#define ASYNC_THREAD_SWITCH ((int16_t)0xff54)
#define FID_REGULAR_MODE 'R'
#define FID_SAVED_STATE 'S'
@@ -55,6 +56,8 @@
static char rpy_rev_buffer[16384]; /* max. 32768 */
int rpy_rev_fileno = -1;
static char flag_io_disabled = FID_REGULAR_MODE;
+static pthread_t current_logged_thread;
+static bool_t current_logged_thread_seen;
static void setup_record_mode(int argc, char *argv[]);
@@ -321,6 +324,21 @@
static long in_invoke_finalizers;
+static void emit_async_block(int async_code, uint64_t content)
+{
+ char *p = rpy_rev_buffer;
+
+ _RPY_REVDB_LOCK();
+ rpy_reverse_db_flush();
+ assert(current_packet_size() == 0);
+
+ *(int16_t *)p = async_code;
+ memcpy(rpy_revdb.buf_p, &content, sizeof(uint64_t));
+ rpy_revdb.buf_p += sizeof(uint64_t);
+ flush_buffer();
+ _RPY_REVDB_UNLOCK();
+}
+
static void record_stop_point(void)
{
/* ===== FINALIZERS =====
@@ -332,19 +350,10 @@
conceptually just *after* the stop point.
*/
int i;
- char *p = rpy_rev_buffer;
int64_t done;
/* Write an ASYNC_FINALIZER_TRIGGER packet */
- _RPY_REVDB_LOCK();
- rpy_reverse_db_flush();
- assert(current_packet_size() == 0);
-
- *(int16_t *)p = ASYNC_FINALIZER_TRIGGER;
- memcpy(rpy_revdb.buf_p, &rpy_revdb.stop_point_seen, sizeof(uint64_t));
- rpy_revdb.buf_p += sizeof(uint64_t);
- flush_buffer();
- _RPY_REVDB_UNLOCK();
+ emit_async_block(ASYNC_FINALIZER_TRIGGER, rpy_revdb.stop_point_seen);
/* Invoke all Boehm finalizers. For new-style finalizers, this
will only cause them to move to the queues, where
@@ -365,6 +374,26 @@
}
RPY_EXTERN
+void rpy_reverse_db_thread_switch(void)
+{
+ /* called at the end of _RPyGilAcquire(), when there was
+ potentially a thread switch. If there actually was, emit an
+ ASYNC_THREAD_SWITCH block. */
+ pthread_t tself;
+ assert(!RPY_RDB_REPLAY);
+
+ tself = pthread_self();
+ if (!current_logged_thread_seen) {
+ current_logged_thread = tself;
+ current_logged_thread_seen = 1;
+ }
+ else if (!pthread_equal(tself, current_logged_thread)) {
+ emit_async_block(ASYNC_THREAD_SWITCH, (uint64_t)tself);
+ current_logged_thread = tself;
+ }
+}
+
+RPY_EXTERN
void rpy_reverse_db_call_destructor(void *obj)
{
/* old-style finalizers. Should occur only from the
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -137,6 +137,11 @@
rpy_reverse_db_invoke_callback(_re); \
}
+#define RPY_REVDB_CALL_GILCTRL(call_code) \
+ if (!RPY_RDB_REPLAY) { \
+ call_code \
+ }
+
#define RPY_REVDB_CALLBACKLOC(locnum) \
rpy_reverse_db_callback_loc(locnum)
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -86,13 +86,13 @@
return self.cur == len(self.buffer)
def write_call(self, expected_string):
- x = self.next() # raw_malloc: the pointer we got
- self.same_thread()
+ x = self.next() # raw_malloc: the pointer we got
+ self.same_stack() # write
x = self.next(); assert x == len(expected_string)
- self.same_thread()
+ self.same_stack() # errno
x = self.next('i'); assert x == 0 # errno
- def same_thread(self):
+ def same_stack(self):
x = self.next('c'); assert x == '\xFC'
diff --git a/rpython/translator/revdb/test/test_callback.py b/rpython/translator/revdb/test/test_callback.py
--- a/rpython/translator/revdb/test/test_callback.py
+++ b/rpython/translator/revdb/test/test_callback.py
@@ -63,14 +63,14 @@
self.compile(main, backendopt=False)
out = self.run('Xx')
rdb = self.fetch_rdb([self.exename, 'Xx'])
- rdb.same_thread() # callmesimple()
+ rdb.same_stack() # callmesimple()
x = rdb.next('i'); assert x == 55555
rdb.write_call('55555\n')
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
x = rdb.next('!h'); assert x == b # -> callback
x = rdb.next('i'); assert x == 3 # arg n
- rdb.same_thread() # <- return in main thread
+ rdb.same_stack() # <- return in main thread
x = rdb.next('i'); assert x == 4000 * 300 # return from callme()
rdb.write_call('%s\n' % (4000 * 300,))
x = rdb.next('q'); assert x == 0 # number of stop points
@@ -87,7 +87,7 @@
x = rdb.next('!h'); assert x == b # -> callback again
x = rdb.next('i'); assert x == 3 # arg n
rdb.write_call('3\n')
- rdb.same_thread() # -> return in main thread
+ rdb.same_stack() # -> return in main thread
x = rdb.next('i'); assert x == 120 # <- return from callme()
rdb.write_call('120\n')
x = rdb.next('q'); assert x == 2 # number of stop points
diff --git a/rpython/translator/revdb/test/test_weak.py b/rpython/translator/revdb/test/test_weak.py
--- a/rpython/translator/revdb/test/test_weak.py
+++ b/rpython/translator/revdb/test/test_weak.py
@@ -203,7 +203,7 @@
assert time == i + 1
y = intmask(rdb.next('q')); assert y == -1
triggered = True
- rdb.same_thread()
+ rdb.same_stack()
j = rdb.next()
assert j == i + 1000000 * triggered
if triggered:
@@ -215,7 +215,7 @@
assert uid > 0 and uid not in uid_seen
uid_seen.add(uid)
lst.append(uid)
- rdb.same_thread()
+ rdb.same_stack()
totals.append((lst, intmask(rdb.next())))
x = rdb.next('q'); assert x == 3000 # number of stop points
#
@@ -245,13 +245,13 @@
assert x != -1
assert x not in seen_uids
seen_uids.add(x)
- rdb.same_thread()
+ rdb.same_stack()
y = intmask(rdb.next())
assert y == -7 # from the __del__
x = intmask(rdb.next())
if x == -1:
break
- rdb.same_thread()
+ rdb.same_stack()
x = rdb.next()
assert x == len(seen_uids)
assert len(seen_uids) == int(out)
From pypy.commits at gmail.com Tue Aug 9 12:55:01 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 09:55:01 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Add typechecking to
type.__dict__ descriptor
Message-ID: <57aa0ae5.2916c20a.891ef.6245@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86119:24dfaf3c5ca1
Date: 2016-08-09 17:54 +0100
http://bitbucket.org/pypy/pypy/changeset/24dfaf3c5ca1/
Log: Add typechecking to type.__dict__ descriptor
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -911,6 +911,7 @@
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
def type_get_dict(space, w_cls):
+ w_cls = _check(space, w_cls)
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
w_dict = w_cls.getdict(space)
if w_dict is None:
From pypy.commits at gmail.com Tue Aug 9 13:12:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 10:12:20 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: still in-progress: write the
ASYNC_THREAD_SWITCH blocks just
Message-ID: <57aa0ef4.85c11c0a.6406c.759b@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86120:46485b2edf2b
Date: 2016-08-09 19:11 +0200
http://bitbucket.org/pypy/pypy/changeset/46485b2edf2b/
Log: still in-progress: write the ASYNC_THREAD_SWITCH blocks just before
we write other things to the log
diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h
--- a/rpython/translator/c/src/thread.h
+++ b/rpython/translator/c/src/thread.h
@@ -26,10 +26,6 @@
#endif /* !_WIN32 */
-#ifdef RPY_REVERSE_DEBUGGER
-RPY_EXTERN void rpy_reverse_db_thread_switch(void);
-#endif
-
RPY_EXTERN void RPyGilAllocate(void);
RPY_EXTERN long RPyGilYieldThread(void);
RPY_EXTERN void RPyGilAcquireSlowPath(long);
@@ -49,9 +45,6 @@
long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
if (old_fastgil != 0)
RPyGilAcquireSlowPath(old_fastgil);
-#ifdef RPY_REVERSE_DEBUGGER
- rpy_reverse_db_thread_switch();
-#endif
}
static inline void _RPyGilRelease(void) {
assert(RPY_FASTGIL_LOCKED(rpy_fastgil));
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -56,8 +56,8 @@
static char rpy_rev_buffer[16384]; /* max. 32768 */
int rpy_rev_fileno = -1;
static char flag_io_disabled = FID_REGULAR_MODE;
-static pthread_t current_logged_thread;
-static bool_t current_logged_thread_seen;
+__thread bool_t rpy_active_thread;
+static bool_t *rpy_active_thread_ptr;
static void setup_record_mode(int argc, char *argv[]);
@@ -254,6 +254,9 @@
rpy_revdb.buf_limit = rpy_rev_buffer + sizeof(rpy_rev_buffer) - 32;
rpy_revdb.unique_id_seen = 1;
+ rpy_active_thread = 1;
+ rpy_active_thread_ptr = &rpy_active_thread;
+
pthread_atfork(NULL, NULL, close_revdb_fileno_in_fork_child);
}
@@ -292,18 +295,6 @@
}
}
-RPY_EXTERN
-void rpy_reverse_db_lock_acquire(void)
-{
- while (1) {
- if (rpy_revdb.lock == 0) {
- if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) == 0)
- break; /* done */
- }
- sched_yield();
- }
-}
-
void boehm_gc_finalizer_notifier(void)
{
/* This is called by Boehm when there are pending finalizers.
@@ -339,6 +330,24 @@
_RPY_REVDB_UNLOCK();
}
+RPY_EXTERN
+void rpy_reverse_db_lock_acquire(void)
+{
+ assert(!RPY_RDB_REPLAY);
+ while (1) {
+ if (rpy_revdb.lock == 0) {
+ if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) == 0)
+ break; /* done */
+ }
+ sched_yield();
+ }
+ /* we have acquired the lock here */
+ *rpy_active_thread_ptr = 0;
+ rpy_active_thread = 1;
+ rpy_active_thread_ptr = &rpy_active_thread;
+ emit_async_block(ASYNC_THREAD_SWITCH, (uint64_t)pthread_self());
+}
+
static void record_stop_point(void)
{
/* ===== FINALIZERS =====
@@ -374,26 +383,6 @@
}
RPY_EXTERN
-void rpy_reverse_db_thread_switch(void)
-{
- /* called at the end of _RPyGilAcquire(), when there was
- potentially a thread switch. If there actually was, emit an
- ASYNC_THREAD_SWITCH block. */
- pthread_t tself;
- assert(!RPY_RDB_REPLAY);
-
- tself = pthread_self();
- if (!current_logged_thread_seen) {
- current_logged_thread = tself;
- current_logged_thread_seen = 1;
- }
- else if (!pthread_equal(tself, current_logged_thread)) {
- emit_async_block(ASYNC_THREAD_SWITCH, (uint64_t)tself);
- current_logged_thread = tself;
- }
-}
-
-RPY_EXTERN
void rpy_reverse_db_call_destructor(void *obj)
{
/* old-style finalizers. Should occur only from the
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -23,6 +23,7 @@
RPY_EXTERN rpy_revdb_t rpy_revdb;
RPY_EXTERN int rpy_rev_fileno;
+RPY_EXTERN __thread bool_t rpy_active_thread;
/* ------------------------------------------------------------ */
@@ -64,7 +65,8 @@
single-threaded during replaying: the lock is only useful during
recording. */
#define _RPY_REVDB_LOCK() \
- if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) != 0) \
+ if (!rpy_active_thread || \
+ pypy_lock_test_and_set(&rpy_revdb.lock, 1) != 0) \
rpy_reverse_db_lock_acquire();
#define _RPY_REVDB_UNLOCK() \
From pypy.commits at gmail.com Tue Aug 9 13:14:44 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 10:14:44 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Implement methods: iterkeys(),
itervalues(), iteritems().
Message-ID: <57aa0f84.ca11c30a.76936.64ff@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86121:d7ad5289b8ff
Date: 2016-08-09 18:14 +0100
http://bitbucket.org/pypy/pypy/changeset/d7ad5289b8ff/
Log: Implement methods: iterkeys(), itervalues(), iteritems().
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -48,12 +48,21 @@
def keys_w(self, space):
return space.call_method(self.w_mapping, "keys")
+ def descr_iterkeys(self, space):
+ return space.call_method(self.w_mapping, "iterkeys")
+
def values_w(self, space):
return space.call_method(self.w_mapping, "values")
+ def descr_itervalues(self, space):
+ return space.call_method(self.w_mapping, "itervalues")
+
def items_w(self, space):
return space.call_method(self.w_mapping, "items")
+ def descr_iteritems(self, space):
+ return space.call_method(self.w_mapping, "iteritems")
+
def copy_w(self, space):
return space.call_method(self.w_mapping, "copy")
@@ -82,8 +91,11 @@
__repr__=interp2app(W_DictProxyObject.descr_repr),
get=interp2app(W_DictProxyObject.get_w),
keys=interp2app(W_DictProxyObject.keys_w),
+ iterkeys=interp2app(W_DictProxyObject.descr_iterkeys),
values=interp2app(W_DictProxyObject.values_w),
+ itervalues=interp2app(W_DictProxyObject.descr_itervalues),
items=interp2app(W_DictProxyObject.items_w),
+ iteritems=interp2app(W_DictProxyObject.descr_iteritems),
copy=interp2app(W_DictProxyObject.copy_w),
**cmp_methods
)
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -72,6 +72,9 @@
assert 'z' not in proxy
assert repr(proxy) == 'dict_proxy(%r)' % mapping
assert proxy.keys() == mapping.keys()
+ assert list(proxy.iterkeys()) == proxy.keys()
+ assert list(proxy.itervalues()) == proxy.values()
+ assert list(proxy.iteritems()) == proxy.items()
raises(TypeError, "proxy['a'] = 4")
raises(TypeError, "del proxy['a']")
raises(AttributeError, "proxy.clear()")
From pypy.commits at gmail.com Tue Aug 9 16:42:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 13:42:07 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: recording threads
Message-ID: <57aa401f.c75dc20a.63be0.0d25@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86122:738711fb525a
Date: 2016-08-09 22:41 +0200
http://bitbucket.org/pypy/pypy/changeset/738711fb525a/
Log: recording threads
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -254,7 +254,7 @@
rpy_revdb.buf_limit = rpy_rev_buffer + sizeof(rpy_rev_buffer) - 32;
rpy_revdb.unique_id_seen = 1;
- rpy_active_thread = 1;
+ rpy_active_thread = 0; /* write an ASYNC_THREAD_SWITCH first in the log */
rpy_active_thread_ptr = &rpy_active_thread;
pthread_atfork(NULL, NULL, close_revdb_fileno_in_fork_child);
@@ -317,9 +317,10 @@
static void emit_async_block(int async_code, uint64_t content)
{
+ /* must be called with the lock held */
char *p = rpy_rev_buffer;
+ assert(rpy_revdb.lock);
- _RPY_REVDB_LOCK();
rpy_reverse_db_flush();
assert(current_packet_size() == 0);
@@ -327,12 +328,12 @@
memcpy(rpy_revdb.buf_p, &content, sizeof(uint64_t));
rpy_revdb.buf_p += sizeof(uint64_t);
flush_buffer();
- _RPY_REVDB_UNLOCK();
}
RPY_EXTERN
void rpy_reverse_db_lock_acquire(void)
{
+ uint64_t pself;
assert(!RPY_RDB_REPLAY);
while (1) {
if (rpy_revdb.lock == 0) {
@@ -345,7 +346,9 @@
*rpy_active_thread_ptr = 0;
rpy_active_thread = 1;
rpy_active_thread_ptr = &rpy_active_thread;
- emit_async_block(ASYNC_THREAD_SWITCH, (uint64_t)pthread_self());
+ pself = (uint64_t)pthread_self();
+ emit_async_block(ASYNC_THREAD_SWITCH, pself);
+ _RPY_REVDB_PRINT("[THRD]", pself);
}
static void record_stop_point(void)
@@ -362,7 +365,9 @@
int64_t done;
/* Write an ASYNC_FINALIZER_TRIGGER packet */
+ _RPY_REVDB_LOCK();
emit_async_block(ASYNC_FINALIZER_TRIGGER, rpy_revdb.stop_point_seen);
+ _RPY_REVDB_UNLOCK();
/* Invoke all Boehm finalizers. For new-style finalizers, this
will only cause them to move to the queues, where
@@ -854,6 +859,18 @@
/* rpy_revdb.buf_limit is not set */
}
+static uint64_t fetch_async_block(void)
+{
+ ssize_t full_packet_size = sizeof(int16_t) + sizeof(int64_t);
+ ssize_t keep = rpy_revdb.buf_readend - rpy_revdb.buf_p;
+ uint64_t result;
+ if (keep < full_packet_size)
+ fetch_more(keep, full_packet_size);
+ memcpy(&result, rpy_revdb.buf_p + sizeof(int16_t), sizeof(int64_t));
+ rpy_revdb.buf_p += full_packet_size;
+ return result;
+}
+
RPY_EXTERN
void rpy_reverse_db_fetch(const char *file, int line)
{
@@ -871,6 +888,7 @@
exit(1);
}
+ read_next_packet:
keep = rpy_revdb.buf_readend - rpy_revdb.buf_p;
assert(keep >= 0);
@@ -892,11 +910,7 @@
"ASYNC_FINALIZER_TRIGGER\n");
exit(1);
}
- full_packet_size = sizeof(int16_t) + sizeof(int64_t);
- if (keep < full_packet_size)
- fetch_more(keep, full_packet_size);
- memcpy(&bp, rpy_revdb.buf_p + sizeof(int16_t), sizeof(int64_t));
- rpy_revdb.buf_p += full_packet_size;
+ bp = fetch_async_block();
if (bp <= rpy_revdb.stop_point_seen) {
fprintf(stderr, "invalid finalizer break point\n");
exit(1);
@@ -908,6 +922,10 @@
rpy_revdb.buf_limit = rpy_revdb.buf_p;
return;
+ case ASYNC_THREAD_SWITCH:
+ fetch_async_block();
+ goto read_next_packet;
+
default:
fprintf(stderr, "bad packet header %d\n", (int)header);
exit(1);
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -35,8 +35,8 @@
# define _RPY_REVDB_PRINT(mode, _e) \
if (rpy_rev_fileno >= 0) { \
fprintf(stderr, \
- "%s:%d: %0*llx\n", \
- __FILE__, __LINE__, 2 * sizeof(_e), \
+ "%s %s:%d: %0*llx\n", \
+ mode, __FILE__, __LINE__, 2 * sizeof(_e), \
((unsigned long long)_e) & ((2ULL << (8*sizeof(_e)-1)) - 1)); \
}
#endif
@@ -47,7 +47,7 @@
if (rpy_rev_fileno >= 0) { \
seeing_uid(uid); \
fprintf(stderr, \
- "%s:%d: obj %llu\n", \
+ "[nobj] %s:%d: obj %llu\n", \
__FILE__, __LINE__, (unsigned long long) uid); \
}
#endif
@@ -76,7 +76,7 @@
#define _RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
{ \
decl_e = variable; \
- _RPY_REVDB_PRINT("write", _e); \
+ _RPY_REVDB_PRINT("[ wr ]", _e); \
memcpy(rpy_revdb.buf_p, &_e, sizeof(_e)); \
if ((rpy_revdb.buf_p += sizeof(_e)) > rpy_revdb.buf_limit) \
rpy_reverse_db_flush(); \
@@ -89,7 +89,7 @@
char *_end1 = _src + sizeof(_e); \
memcpy(&_e, _src, sizeof(_e)); \
rpy_revdb.buf_p = _end1; \
- _RPY_REVDB_PRINT("read", _e); \
+ _RPY_REVDB_PRINT("[read]", _e); \
if (_end1 >= rpy_revdb.buf_limit) \
rpy_reverse_db_fetch(__FILE__, __LINE__); \
variable = _e; \
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -13,6 +13,9 @@
from rpython.translator.revdb.process import ReplayProcess
+ASYNC_THREAD_SWITCH = 0xff54 - 2**16
+
+
class RDB(object):
def __init__(self, filename, expected_argv):
with open(filename, 'rb') as f:
@@ -30,6 +33,7 @@
self.argc = self.read1('i')
self.argv = self.read1('P')
self.current_packet_end = self.cur
+ self.main_thread_id = self.switch_thread()
self.read_check_argv(expected_argv)
def read1(self, mode):
@@ -95,15 +99,22 @@
def same_stack(self):
x = self.next('c'); assert x == '\xFC'
+ def switch_thread(self, expected=None):
+ th, = self.special_packet(ASYNC_THREAD_SWITCH, 'q')
+ if expected is not None:
+ assert th == expected
+ return th
+
def compile(self, entry_point, backendopt=True,
- withsmallfuncsets=None, shared=False):
+ withsmallfuncsets=None, shared=False, thread=False):
t = Translation(entry_point, None, gc="boehm")
self.t = t
t.set_backend_extra_options(c_debug_defines=True)
t.config.translation.reverse_debugger = True
t.config.translation.lldebug0 = True
t.config.translation.shared = shared
+ t.config.translation.thread = thread
if withsmallfuncsets is not None:
t.config.translation.withsmallfuncsets = withsmallfuncsets
if not backendopt:
From pypy.commits at gmail.com Tue Aug 9 16:45:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 13:45:42 -0700 (PDT)
Subject: [pypy-commit] pypy.org extradoc: update the values
Message-ID: <57aa40f6.274fc20a.a9df1.b17b@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r776:8ade9f34749c
Date: 2016-08-09 22:45 +0200
http://bitbucket.org/pypy/pypy.org/changeset/8ade9f34749c/
Log: update the values
diff --git a/don1.html b/don1.html
--- a/don1.html
+++ b/don1.html
@@ -15,7 +15,7 @@
- $64800 of $105000 (61.7%)
+ $64836 of $105000 (61.7%)
@@ -23,7 +23,7 @@
This donation goes towards supporting Python 3 in PyPy.
Current status:
- we have $5283 left
+ we have $5314 left
in the account. Read proposal
diff --git a/don4.html b/don4.html
--- a/don4.html
+++ b/don4.html
@@ -17,7 +17,7 @@
2nd call:
- $30845 of $80000 (38.6%)
+ $30855 of $80000 (38.6%)
@@ -25,7 +25,7 @@
This donation goes towards supporting the Transactional Memory in PyPy.
Current status:
- we have $23502 left
+ we have $23511 left
in the account. Read proposal (2nd call)
From pypy.commits at gmail.com Tue Aug 9 19:32:16 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 16:32:16 -0700 (PDT)
Subject: [pypy-commit] pypy py2-mappingproxy: Close branch py2-mappingproxy
Message-ID: <57aa6800.94071c0a.23329.e037@mx.google.com>
Author: Ronan Lamy
Branch: py2-mappingproxy
Changeset: r86123:812334df02d1
Date: 2016-08-10 00:31 +0100
http://bitbucket.org/pypy/pypy/changeset/812334df02d1/
Log: Close branch py2-mappingproxy
From pypy.commits at gmail.com Tue Aug 9 19:32:54 2016
From: pypy.commits at gmail.com (rlamy)
Date: Tue, 09 Aug 2016 16:32:54 -0700 (PDT)
Subject: [pypy-commit] pypy default: Merged in py2-mappingproxy (pull
request #470)
Message-ID: <57aa6826.94a51c0a.3a1e7.e582@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r86124:1900672c0757
Date: 2016-08-10 00:31 +0100
http://bitbucket.org/pypy/pypy/changeset/1900672c0757/
Log: Merged in py2-mappingproxy (pull request #470)
Fix the dictproxy type to behave as in CPython. App-level
cls.__dict__ and C-level cls->tp_dict now return different objects
with the former being an opaque (at app-level) wrapper around the
latter.
diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py
--- a/pypy/module/cppyy/pythonify.py
+++ b/pypy/module/cppyy/pythonify.py
@@ -175,7 +175,7 @@
"__new__" : make_new(class_name),
}
pycppclass = metacpp(class_name, _drop_cycles(bases), d)
-
+
# cache result early so that the class methods can find the class itself
setattr(scope, final_class_name, pycppclass)
@@ -192,13 +192,10 @@
for dm_name in cppclass.get_datamember_names():
cppdm = cppclass.get_datamember(dm_name)
- # here, setattr() can not be used, because a data member can shadow one in
- # its base class, resulting in the __set__() of its base class being called
- # by setattr(); so, store directly on the dictionary
- pycppclass.__dict__[dm_name] = cppdm
+ setattr(pycppclass, dm_name, cppdm)
import cppyy
if cppyy._is_static(cppdm): # TODO: make this a method of cppdm
- metacpp.__dict__[dm_name] = cppdm
+ setattr(metacpp, dm_name, cppdm)
# the call to register will add back-end specific pythonizations and thus
# needs to run first, so that the generic pythonizations can use them
@@ -413,7 +410,7 @@
lib = cppyy._load_dictionary(name)
_loaded_dictionaries[name] = lib
return lib
-
+
def _init_pythonify():
# cppyy should not be loaded at the module level, as that will trigger a
# call to space.getbuiltinmodule(), which will cause cppyy to be loaded
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -282,11 +282,41 @@
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
+ '''),
+ ("get_type_dict", "METH_O",
'''
- )
+ PyObject* value = args->ob_type->tp_dict;
+ if (value == NULL) value = Py_None;
+ Py_INCREF(value);
+ return value;
+ '''),
])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ class A(object):
+ pass
+ obj = A()
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ d = module.get_type_dict(1)
+ assert type(d) is dict
+ try:
+ d["_some_attribute"] = 1
+ except TypeError: # on PyPy, int.__dict__ is really immutable
+ pass
+ else:
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
@@ -355,6 +385,21 @@
api.Py_DecRef(ref)
+ def test_type_dict(self, space, api):
+ w_class = space.appexec([], """():
+ class A(object):
+ pass
+ return A
+ """)
+ ref = make_ref(space, w_class)
+
+ py_type = rffi.cast(PyTypeObjectPtr, ref)
+ w_dict = from_ref(space, py_type.c_tp_dict)
+ w_name = space.wrap('a')
+ space.setitem(w_dict, w_name, space.wrap(1))
+ assert space.int_w(space.getattr(w_class, w_name)) == 1
+ space.delitem(w_dict, w_name)
+
def test_multiple_inheritance(self, space, api):
w_class = space.appexec([], """():
class A(object):
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -272,12 +272,12 @@
if len(slot_names) == 1:
if not getattr(pto, slot_names[0]):
setattr(pto, slot_names[0], slot_func_helper)
- elif (w_type.getname(space) in ('list', 'tuple') and
+ elif (w_type.getname(space) in ('list', 'tuple') and
slot_names[0] == 'c_tp_as_number'):
# XXX hack - hwo can we generalize this? The problem is method
# names like __mul__ map to more than one slot, and we have no
# convenient way to indicate which slots CPython have filled
- #
+ #
# We need at least this special case since Numpy checks that
# (list, tuple) do __not__ fill tp_as_number
pass
@@ -860,8 +860,8 @@
if w_obj.is_cpytype():
Py_DecRef(space, pto.c_tp_dict)
- w_dict = w_obj.getdict(space)
- pto.c_tp_dict = make_ref(space, w_dict)
+ w_dict = w_obj.getdict(space)
+ pto.c_tp_dict = make_ref(space, w_dict)
@cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyType_IsSubtype(space, a, b):
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/classdict.py
copy from pypy/objspace/std/dictproxyobject.py
copy to pypy/objspace/std/classdict.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/classdict.py
@@ -7,7 +7,7 @@
from pypy.objspace.std.typeobject import unwrap_cell
-class DictProxyStrategy(DictStrategy):
+class ClassDictStrategy(DictStrategy):
erase, unerase = rerased.new_erasing_pair("dictproxy")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
@@ -85,7 +85,8 @@
return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys())
def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
+ return [unwrap_cell(self.space, w_value) for w_value in
+ self.unerase(w_dict.dstorage).dict_w.itervalues()]
def items(self, w_dict):
space = self.space
@@ -103,13 +104,17 @@
def getiterkeys(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.iterkeys()
+
def getitervalues(self, w_dict):
return self.unerase(w_dict.dstorage).dict_w.itervalues()
+
def getiteritems_with_hash(self, w_dict):
return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
+
def wrapkey(space, key):
return space.wrap(key)
+
def wrapvalue(space, value):
return unwrap_cell(space, value)
-create_iterator_classes(DictProxyStrategy)
+create_iterator_classes(ClassDictStrategy)
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,115 +1,101 @@
-from rpython.rlib import rerased
-from rpython.rlib.objectmodel import iteritems_with_hash
+"""
+Read-only proxy for mappings.
-from pypy.interpreter.error import OperationError, oefmt
-from pypy.objspace.std.dictmultiobject import (
- DictStrategy, create_iterator_classes)
-from pypy.objspace.std.typeobject import unwrap_cell
+Its main use is as the return type of cls.__dict__.
+"""
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
+from pypy.interpreter.typedef import TypeDef, interp2app
-class DictProxyStrategy(DictStrategy):
- erase, unerase = rerased.new_erasing_pair("dictproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
+class W_DictProxyObject(W_Root):
+ "Read-only proxy for mappings."
- def __init__(self, space):
- DictStrategy.__init__(self, space)
+ def __init__(self, w_mapping):
+ self.w_mapping = w_mapping
- def getitem(self, w_dict, w_key):
- space = self.space
- w_lookup_type = space.type(w_key)
- if (space.is_w(w_lookup_type, space.w_str) or # Most common path first
- space.abstract_issubclass_w(w_lookup_type, space.w_str)):
- return self.getitem_str(w_dict, space.str_w(w_key))
- elif space.abstract_issubclass_w(w_lookup_type, space.w_unicode):
- try:
- w_key = space.str(w_key)
- except OperationError as e:
- if not e.match(space, space.w_UnicodeEncodeError):
- raise
- # non-ascii unicode is never equal to a byte string
- return None
- return self.getitem_str(w_dict, space.str_w(w_key))
- else:
- return None
+ @staticmethod
+ def descr_new(space, w_type, w_mapping):
+ raise oefmt(space.w_TypeError, "Cannot create 'dictproxy' instances")
- def getitem_str(self, w_dict, key):
- return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
+ def descr_init(self, space, __args__):
+ pass
- def setitem(self, w_dict, w_key, w_value):
- space = self.space
- if space.is_w(space.type(w_key), space.w_str):
- self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
- else:
- raise oefmt(space.w_TypeError,
- "cannot add non-string keys to dict of a type")
+ def descr_len(self, space):
+ return space.len(self.w_mapping)
- def setitem_str(self, w_dict, key, w_value):
- w_type = self.unerase(w_dict.dstorage)
- try:
- w_type.setdictvalue(self.space, key, w_value)
- except OperationError as e:
- if not e.match(self.space, self.space.w_TypeError):
- raise
- if not w_type.is_cpytype():
- raise
- # Allow cpyext to write to type->tp_dict even in the case
- # of a builtin type.
- # Like CPython, we assume that this is only done early
- # after the type is created, and we don't invalidate any
- # cache. User code shoud call PyType_Modified().
- w_type.dict_w[key] = w_value
+ def descr_getitem(self, space, w_key):
+ return space.getitem(self.w_mapping, w_key)
- def setdefault(self, w_dict, w_key, w_default):
- w_result = self.getitem(w_dict, w_key)
- if w_result is not None:
- return w_result
- self.setitem(w_dict, w_key, w_default)
- return w_default
+ def descr_contains(self, space, w_key):
+ return space.contains(self.w_mapping, w_key)
- def delitem(self, w_dict, w_key):
- space = self.space
- w_key_type = space.type(w_key)
- if space.is_w(w_key_type, space.w_str):
- key = self.space.str_w(w_key)
- if not self.unerase(w_dict.dstorage).deldictvalue(space, key):
- raise KeyError
- else:
- raise KeyError
+ def descr_iter(self, space):
+ return space.iter(self.w_mapping)
- def length(self, w_dict):
- return len(self.unerase(w_dict.dstorage).dict_w)
+ def descr_str(self, space):
+ return space.str(self.w_mapping)
- def w_keys(self, w_dict):
- space = self.space
- return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys())
+ def descr_repr(self, space):
+ return space.wrap("dict_proxy(%s)" %
+ (space.str_w(space.repr(self.w_mapping)),))
- def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
+ @unwrap_spec(w_default=WrappedDefault(None))
+ def get_w(self, space, w_key, w_default):
+ return space.call_method(self.w_mapping, "get", w_key, w_default)
- def items(self, w_dict):
- space = self.space
- return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)])
- for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()]
+ def keys_w(self, space):
+ return space.call_method(self.w_mapping, "keys")
- def clear(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- if not w_type.is_heaptype():
- raise oefmt(space.w_TypeError,
- "can't clear dictionary of type '%N'", w_type)
- w_type.dict_w.clear()
- w_type.mutated(None)
+ def descr_iterkeys(self, space):
+ return space.call_method(self.w_mapping, "iterkeys")
- def getiterkeys(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.iterkeys()
- def getitervalues(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.itervalues()
- def getiteritems_with_hash(self, w_dict):
- return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
- def wrapkey(space, key):
- return space.wrap(key)
- def wrapvalue(space, value):
- return unwrap_cell(space, value)
+ def values_w(self, space):
+ return space.call_method(self.w_mapping, "values")
-create_iterator_classes(DictProxyStrategy)
+ def descr_itervalues(self, space):
+ return space.call_method(self.w_mapping, "itervalues")
+
+ def items_w(self, space):
+ return space.call_method(self.w_mapping, "items")
+
+ def descr_iteritems(self, space):
+ return space.call_method(self.w_mapping, "iteritems")
+
+ def copy_w(self, space):
+ return space.call_method(self.w_mapping, "copy")
+
+cmp_methods = {}
+def make_cmp_method(op):
+ def descr_op(self, space, w_other):
+ return getattr(space, op)(self.w_mapping, w_other)
+ descr_name = 'descr_' + op
+ descr_op.__name__ = descr_name
+ setattr(W_DictProxyObject, descr_name, descr_op)
+ cmp_methods['__%s__' % op] = interp2app(getattr(W_DictProxyObject, descr_name))
+
+for op in ['eq', 'ne', 'gt', 'ge', 'lt', 'le']:
+ make_cmp_method(op)
+
+
+W_DictProxyObject.typedef = TypeDef(
+ 'dictproxy',
+ __new__=interp2app(W_DictProxyObject.descr_new),
+ __init__=interp2app(W_DictProxyObject.descr_init),
+ __len__=interp2app(W_DictProxyObject.descr_len),
+ __getitem__=interp2app(W_DictProxyObject.descr_getitem),
+ __contains__=interp2app(W_DictProxyObject.descr_contains),
+ __iter__=interp2app(W_DictProxyObject.descr_iter),
+ __str__=interp2app(W_DictProxyObject.descr_str),
+ __repr__=interp2app(W_DictProxyObject.descr_repr),
+ get=interp2app(W_DictProxyObject.get_w),
+ keys=interp2app(W_DictProxyObject.keys_w),
+ iterkeys=interp2app(W_DictProxyObject.descr_iterkeys),
+ values=interp2app(W_DictProxyObject.values_w),
+ itervalues=interp2app(W_DictProxyObject.descr_itervalues),
+ items=interp2app(W_DictProxyObject.items_w),
+ iteritems=interp2app(W_DictProxyObject.descr_iteritems),
+ copy=interp2app(W_DictProxyObject.copy_w),
+ **cmp_methods
+)
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -9,37 +9,19 @@
assert 'a' in NotEmpty.__dict__
assert 'a' in NotEmpty.__dict__.keys()
assert 'b' not in NotEmpty.__dict__
- NotEmpty.__dict__['b'] = 4
- assert NotEmpty.b == 4
- del NotEmpty.__dict__['b']
assert NotEmpty.__dict__.get("b") is None
+ raises(TypeError, "NotEmpty.__dict__['b'] = 4")
raises(TypeError, 'NotEmpty.__dict__[15] = "y"')
- raises(KeyError, 'del NotEmpty.__dict__[15]')
+ raises(TypeError, 'del NotEmpty.__dict__[15]')
- assert NotEmpty.__dict__.setdefault("string", 1) == 1
- assert NotEmpty.__dict__.setdefault("string", 2) == 1
- assert NotEmpty.string == 1
- raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
-
- def test_dictproxy_popitem(self):
- class A(object):
- a = 42
- seen = 0
- try:
- while True:
- key, value = A.__dict__.popitem()
- if key == 'a':
- assert value == 42
- seen += 1
- except KeyError:
- pass
- assert seen == 1
+ raises(AttributeError, 'NotEmpty.__dict__.setdefault')
def test_dictproxy_getitem(self):
class NotEmpty(object):
a = 1
assert 'a' in NotEmpty.__dict__
- class substr(str): pass
+ class substr(str):
+ pass
assert substr('a') in NotEmpty.__dict__
assert u'a' in NotEmpty.__dict__
assert NotEmpty.__dict__[u'a'] == 1
@@ -62,15 +44,40 @@
class a(object):
pass
s1 = repr(a.__dict__)
+ assert s1.startswith('dict_proxy({') and s1.endswith('})')
s2 = str(a.__dict__)
- assert s1 == s2
- assert s1.startswith('{') and s1.endswith('}')
+ assert s1 == 'dict_proxy(%s)' % s2
def test_immutable_dict_on_builtin_type(self):
raises(TypeError, "int.__dict__['a'] = 1")
- raises(TypeError, int.__dict__.popitem)
- raises(TypeError, int.__dict__.clear)
+ raises((AttributeError, TypeError), "int.__dict__.popitem()")
+ raises((AttributeError, TypeError), "int.__dict__.clear()")
+
+ def test_dictproxy(self):
+ dictproxy = type(int.__dict__)
+ assert dictproxy is not dict
+ assert dictproxy.__name__ == 'dictproxy'
+ raises(TypeError, dictproxy)
+
+ mapping = {'a': 1}
+ raises(TypeError, dictproxy, mapping)
+
+ class A(object):
+ a = 1
+
+ proxy = A.__dict__
+ mapping = dict(proxy)
+ assert proxy['a'] == 1
+ assert 'a' in proxy
+ assert 'z' not in proxy
+ assert repr(proxy) == 'dict_proxy(%r)' % mapping
+ assert proxy.keys() == mapping.keys()
+ assert list(proxy.iterkeys()) == proxy.keys()
+ assert list(proxy.itervalues()) == proxy.values()
+ assert list(proxy.iteritems()) == proxy.items()
+ raises(TypeError, "proxy['a'] = 4")
+ raises(TypeError, "del proxy['a']")
+ raises(AttributeError, "proxy.clear()")
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
-
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -968,7 +968,6 @@
raises(TypeError, setattr, list, 'foobar', 42)
raises(TypeError, delattr, dict, 'keys')
raises(TypeError, 'int.__dict__["a"] = 1')
- raises(TypeError, 'int.__dict__.clear()')
def test_nontype_in_mro(self):
class OldStyle:
@@ -1026,10 +1025,9 @@
pass
a = A()
+ d = A.__dict__
A.x = 1
- assert A.__dict__["x"] == 1
- A.__dict__['x'] = 5
- assert A.x == 5
+ assert d["x"] == 1
def test_we_already_got_one_1(self):
# Issue #2079: highly obscure: CPython complains if we say
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -3,8 +3,8 @@
from pypy.interpreter.baseobjspace import W_Root, SpaceCache
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import Function, StaticMethod
-from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
- descr_get_dict, dict_descr, Member, TypeDef
+from pypy.interpreter.typedef import (
+ weakref_descr, GetSetProperty, dict_descr, Member, TypeDef)
from pypy.interpreter.astcompiler.misc import mangle
from pypy.module.__builtin__ import abstractinst
@@ -346,7 +346,7 @@
def deldictvalue(self, space, key):
if self.lazyloaders:
self._cleanup_() # force un-lazification
- if not self.is_heaptype():
+ if not (self.is_heaptype() or self.is_cpytype()):
raise oefmt(space.w_TypeError,
"can't delete attributes on type object '%N'", self)
try:
@@ -485,12 +485,12 @@
self.getdictvalue(self.space, attr)
del self.lazyloaders
- def getdict(self, space): # returning a dict-proxy!
- from pypy.objspace.std.dictproxyobject import DictProxyStrategy
+ def getdict(self, space):
+ from pypy.objspace.std.classdict import ClassDictStrategy
from pypy.objspace.std.dictmultiobject import W_DictObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
- strategy = space.fromcache(DictProxyStrategy)
+ strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
return W_DictObject(space, strategy, storage)
@@ -910,13 +910,21 @@
return space.newbool(
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
+def type_get_dict(space, w_cls):
+ w_cls = _check(space, w_cls)
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ w_dict = w_cls.getdict(space)
+ if w_dict is None:
+ return space.w_None
+ return W_DictProxyObject(w_dict)
+
W_TypeObject.typedef = TypeDef("type",
__new__ = gateway.interp2app(descr__new__),
__name__ = GetSetProperty(descr_get__name__, descr_set__name__),
__bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__),
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
- __dict__ = GetSetProperty(descr_get_dict),
+ __dict__=GetSetProperty(type_get_dict),
__doc__ = GetSetProperty(descr__doc),
mro = gateway.interp2app(descr_mro),
__flags__ = GetSetProperty(descr__flags),
From pypy.commits at gmail.com Wed Aug 10 00:02:09 2016
From: pypy.commits at gmail.com (mattip)
Date: Tue, 09 Aug 2016 21:02:09 -0700 (PDT)
Subject: [pypy-commit] pypy default: test, fix PySequence_Fast getslice;
remove outdated document
Message-ID: <57aaa741.c2f3c20a.a80d5.1951@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86125:72d14a4de609
Date: 2016-08-10 07:01 +0300
http://bitbucket.org/pypy/pypy/changeset/72d14a4de609/
Log: test, fix PySequence_Fast getslice; remove outdated document
diff --git a/pypy/module/cpyext/c-api.txt b/pypy/module/cpyext/c-api.txt
deleted file mode 100644
--- a/pypy/module/cpyext/c-api.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Reference Count
-===============
-
-XXX
-
-Borrowed References
-===================
-
-XXX
-
-PyStringObject support
-======================
-
-The problem
------------
-
-PyString_AsString() returns a (non-movable) pointer to the underlying
-buffer, whereas pypy strings are movable. C code may temporarily
-store this address and use it, as long as it owns a reference to the
-PyObject. There is no "release" function to specify that the pointer
-is not needed any more.
-
-Note that the pointer may be used to fill the initial value of
-string. This is valid only when the string was just allocated, and is
-not used elsewhere.
-
-Proposed solution
------------------
-
-Our emulation of the PyStringObject contains an additional member: a
-pointer to a char buffer; it may be NULL.
-
-- A string allocated by pypy will be converted into a PyStringObject
- with a NULL buffer. When PyString_AsString() is called, memory is
- allocated (with flavor='raw') and content is copied.
-
-- A string allocated with PyString_FromStringAndSize(NULL, size) will
- allocate a buffer with the specified size, but the reference won't
- be stored in the global map py_objects_r2w; there won't be a
- corresponding object in pypy. When from_ref() or Py_INCREF() is
- called, the pypy string is created, and added in py_objects_r2w.
- The buffer is then supposed to be immutable.
-
-- _PyString_Resize works only on not-yet-pypy'd strings, and returns a
- similar object.
-
-- PyString_Size don't need to force the object. (in this case, another
- "size" member is needed)
-
-- There could be an (expensive!) check in from_ref() that the buffer
- still corresponds to the pypy gc-managed string.
-
-PySequence_Fast support
-======================
-There are five functions for fast sequence access offered by the CPython API:
-
-PyObject* PySequence_Fast(PyObject *o, const char *m)
-
-PyObject* PySequence_Fast_GET_ITEM( PyObject *o, int i)
-
-PyObject** PySequence_Fast_ITEMS( PyObject *o)
-
-PyObject* PySequence_ITEM( PyObject *o, int i)
-
-int PySequence_Fast_GET_SIZE( PyObject *o)
-
-PyPy supports four of these, but does not support PySequence_Fast_ITEMS.
-(Various ways to support PySequence_Fast_ITEMS were considered. They all had
-two things in common: they would have taken a lot of work, and they would have
-resulted in incomplete semantics or in poor performance. We decided that a slow
-implementation of PySequence_Fast_ITEMS was not very useful.)
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -10,7 +10,7 @@
from pypy.objspace.std import tupleobject
from pypy.module.cpyext.tupleobject import PyTuple_Check, PyTuple_SetItem
-from pypy.module.cpyext.object import Py_IncRef, Py_DecRef
+from pypy.module.cpyext.pyobject import decref
from pypy.module.cpyext.dictobject import PyDict_Check
@@ -252,7 +252,7 @@
def setitem(self, w_list, index, w_obj):
storage = self.unerase(w_list.lstorage)
index = self._check_index(index, storage._length)
- Py_DecRef(w_list.space, storage._elems[index])
+ decref(w_list.space, storage._elems[index])
storage._elems[index] = make_ref(w_list.space, w_obj)
def length(self, w_list):
@@ -264,9 +264,8 @@
return storage._elems
def getslice(self, w_list, start, stop, step, length):
- #storage = self.unerase(w_list.lstorage)
- raise oefmt(w_list.space.w_NotImplementedError,
- "settting a slice of a PySequence_Fast is not supported")
+ w_list.switch_to_object_strategy()
+ return w_list.strategy.getslice(w_list, start, stop, step, length)
def getitems(self, w_list):
# called when switching list strategy, so convert storage
@@ -389,5 +388,5 @@
def __del__(self):
for i in range(self._length):
- Py_DecRef(self.space, self._elems[i])
+ decref(self.space, self._elems[i])
lltype.free(self._elems, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -78,6 +78,17 @@
assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+ def test_get_slice_fast(self, space, api):
+ w_t = space.wrap([1, 2, 3, 4, 5])
+ api.PySequence_Fast(w_t, "foo") # converts
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 2, 4)) == [3, 4]
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 1, -1)) == [2, 3, 4]
+
+ assert api.PySequence_DelSlice(w_t, 1, 4) == 0
+ assert space.eq_w(w_t, space.wrap([1, 5]))
+ assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
+ assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+
def test_iter(self, space, api):
w_t = space.wrap((1, 2))
w_iter = api.PySeqIter_New(w_t)
@@ -226,18 +237,33 @@
assert space.int_w(space.len(w_l)) == 10
-class XAppTestSequenceObject(AppTestCpythonExtensionBase):
- def test_sequenceobject(self):
+class AppTestSequenceObject(AppTestCpythonExtensionBase):
+ def test_fast(self):
module = self.import_extension('foo', [
("test_fast_sequence", "METH_VARARGS",
"""
- PyObject * o = PyTuple_GetItem(args, 0);
+ int size, i;
+ PyTypeObject * common_type;
+ PyObject *foo, **objects;
+ PyObject * seq = PyTuple_GetItem(args, 0);
/* XXX assert it is a tuple */
- PyObject *foo = PySequence_Fast(o, "some string");
- PyObject ** res = PySequence_Fast_ITEMS(foo);
- /* XXX do some kind of test on res */
- /* XXX now what? who manages res's refcount? */
+ if (seq == NULL)
+ Py_RETURN_NONE;
+ foo = PySequence_Fast(seq, "some string");
+ objects = PySequence_Fast_ITEMS(foo);
+ size = PySequence_Fast_GET_SIZE(seq);
+ common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
+ for (i = 1; i < size; ++i) {
+ if (Py_TYPE(objects[i]) != common_type) {
+ common_type = NULL;
+ break;
+ }
+ }
+ Py_DECREF(foo);
+ Py_DECREF(common_type);
return PyBool_FromLong(1);
""")])
- assert module.test_fast_sequence([1, 2, 3, 4])
+ s = [1, 2, 3, 4]
+ assert module.test_fast_sequence(s[0:-1])
+ assert module.test_fast_sequence(s[::-1])
From pypy.commits at gmail.com Wed Aug 10 02:35:25 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 09 Aug 2016 23:35:25 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async-translate: close branch
Message-ID: <57aacb2d.271ac20a.e6d8c.317a@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async-translate
Changeset: r86126:2569ef7fa3cc
Date: 2016-08-10 08:34 +0200
http://bitbucket.org/pypy/pypy/changeset/2569ef7fa3cc/
Log: close branch
From pypy.commits at gmail.com Wed Aug 10 02:43:46 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 09 Aug 2016 23:43:46 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: fix locking logic
Message-ID: <57aacd22.08d11c0a.7d6c5.4372@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86127:a0e97b020056
Date: 2016-08-10 08:43 +0200
http://bitbucket.org/pypy/pypy/changeset/a0e97b020056/
Log: fix locking logic
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -331,11 +331,11 @@
}
RPY_EXTERN
-void rpy_reverse_db_lock_acquire(void)
+void rpy_reverse_db_lock_acquire(bool_t lock_contention)
{
uint64_t pself;
assert(!RPY_RDB_REPLAY);
- while (1) {
+ while (lock_contention) {
if (rpy_revdb.lock == 0) {
if (pypy_lock_test_and_set(&rpy_revdb.lock, 1) == 0)
break; /* done */
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -65,10 +65,11 @@
single-threaded during replaying: the lock is only useful during
recording. */
#define _RPY_REVDB_LOCK() \
- if (!rpy_active_thread || \
- pypy_lock_test_and_set(&rpy_revdb.lock, 1) != 0) \
- rpy_reverse_db_lock_acquire();
-
+ { \
+ bool_t _lock_contention = pypy_lock_test_and_set(&rpy_revdb.lock, 1); \
+ if (_lock_contention || !rpy_active_thread) \
+ rpy_reverse_db_lock_acquire(_lock_contention); \
+ }
#define _RPY_REVDB_UNLOCK() \
pypy_lock_release(&rpy_revdb.lock)
@@ -221,6 +222,6 @@
RPY_EXTERN void rpy_reverse_db_call_destructor(void *obj);
RPY_EXTERN void rpy_reverse_db_invoke_callback(unsigned char);
RPY_EXTERN void rpy_reverse_db_callback_loc(int);
-RPY_EXTERN void rpy_reverse_db_lock_acquire(void);
+RPY_EXTERN void rpy_reverse_db_lock_acquire(bool_t lock_contention);
/* ------------------------------------------------------------ */
From pypy.commits at gmail.com Wed Aug 10 03:26:15 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 10 Aug 2016 00:26:15 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: fixes
Message-ID: <57aad717.d8011c0a.c929d.5215@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86128:33ad82b0c795
Date: 2016-08-10 09:25 +0200
http://bitbucket.org/pypy/pypy/changeset/33ad82b0c795/
Log: fixes
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -203,7 +203,8 @@
# use generate_source(defines=DEBUG_DEFINES) to force the #definition
# of the macros that enable debugging assertions
DEBUG_DEFINES = {'RPY_ASSERT': 1,
- 'RPY_LL_ASSERT': 1}
+ 'RPY_LL_ASSERT': 1,
+ 'RPY_REVDB_PRINT_ALL': 1}
def generate_source(self, db=None, defines={}, exe_name=None):
assert self.c_source_filename is None
diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py
--- a/rpython/translator/revdb/gencsupp.py
+++ b/rpython/translator/revdb/gencsupp.py
@@ -83,9 +83,18 @@
return call_code # a hack for ll_call_destructor() to mean
# that the calls should really be done
#
- # hack: we don't need the flag for at least these two common functions
- if call_code in ('RPyGilRelease();', 'RPyGilAcquire();'):
+ # hack: we don't need the flag for at least this common function
+ if call_code == 'RPyGilRelease();':
return 'RPY_REVDB_CALL_GILCTRL(%s);' % (call_code,)
+ if call_code == 'RPyGilAcquire();':
+ # Could also work with a regular RPY_REVDB_CALL_VOID, but we
+ # use a different byte (0xFD instead of 0xFC) to detect more
+ # sync misses. In a single-threaded environment this 0xFD
+ # byte is not needed at all, but in a multi-threaded
+ # environment it ensures that during replaying, we don't go
+ # past the RPyGilAcquire() in case a different thread must run
+ # next.
+ return 'RPY_REVDB_CALL_GIL(%s);' % (call_code,)
#
tp = funcgen.lltypename(v_result)
if tp == 'void @':
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -1526,6 +1526,13 @@
} while (e != 0xFC);
}
+RPY_EXTERN
+void rpy_reverse_db_bad_acquire_gil(void)
+{
+ fprintf(stderr, "out of sync: unexpected byte in log (at acquire_gil)\n");
+ exit(1);
+}
+
/* ------------------------------------------------------------ */
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -31,7 +31,8 @@
RPY_EXTERN void rpy_reverse_db_setup(int *argc_p, char **argv_p[]);
RPY_EXTERN void rpy_reverse_db_teardown(void);
-#if 0 /* enable to print locations to stderr of all the EMITs */
+/* enable to print locations to stderr of all the EMITs */
+#ifdef RPY_REVDB_PRINT_ALL
# define _RPY_REVDB_PRINT(mode, _e) \
if (rpy_rev_fileno >= 0) { \
fprintf(stderr, \
@@ -41,7 +42,8 @@
}
#endif
-#if 0 /* enable to print all mallocs to stderr */
+/* enable to print all mallocs to stderr */
+#ifdef RPY_REVDB_PRINT_ALL
RPY_EXTERN void seeing_uid(uint64_t uid);
# define _RPY_REVDB_PRUID() \
if (rpy_rev_fileno >= 0) { \
@@ -140,6 +142,20 @@
rpy_reverse_db_invoke_callback(_re); \
}
+#define RPY_REVDB_CALL_GIL(call_code) \
+ if (!RPY_RDB_REPLAY) { \
+ call_code \
+ _RPY_REVDB_LOCK(); \
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFD) \
+ _RPY_REVDB_UNLOCK(); \
+ } \
+ else { \
+ unsigned char _re; \
+ _RPY_REVDB_EMIT_REPLAY(unsigned char _e, _re) \
+ if (_re != 0xFD) \
+ rpy_reverse_db_bad_acquire_gil(); \
+ }
+
#define RPY_REVDB_CALL_GILCTRL(call_code) \
if (!RPY_RDB_REPLAY) { \
call_code \
@@ -223,5 +239,6 @@
RPY_EXTERN void rpy_reverse_db_invoke_callback(unsigned char);
RPY_EXTERN void rpy_reverse_db_callback_loc(int);
RPY_EXTERN void rpy_reverse_db_lock_acquire(bool_t lock_contention);
+RPY_EXTERN void rpy_reverse_db_bad_acquire_gil(void);
/* ------------------------------------------------------------ */
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -95,10 +95,14 @@
x = self.next(); assert x == len(expected_string)
self.same_stack() # errno
x = self.next('i'); assert x == 0 # errno
+ self.gil_acquire()
def same_stack(self):
x = self.next('c'); assert x == '\xFC'
+ def gil_acquire(self):
+ x = self.next('c'); assert x == '\xFD'
+
def switch_thread(self, expected=None):
th, = self.special_packet(ASYNC_THREAD_SWITCH, 'q')
if expected is not None:
diff --git a/rpython/translator/revdb/test/test_callback.py b/rpython/translator/revdb/test/test_callback.py
--- a/rpython/translator/revdb/test/test_callback.py
+++ b/rpython/translator/revdb/test/test_callback.py
@@ -65,13 +65,17 @@
rdb = self.fetch_rdb([self.exename, 'Xx'])
rdb.same_stack() # callmesimple()
x = rdb.next('i'); assert x == 55555
+ rdb.gil_acquire()
rdb.write_call('55555\n')
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
+ rdb.gil_acquire()
x = rdb.next('!h'); assert x == b # -> callback
x = rdb.next('i'); assert x == 3 # arg n
+ rdb.gil_acquire()
rdb.same_stack() # <- return in main thread
x = rdb.next('i'); assert x == 4000 * 300 # return from callme()
+ rdb.gil_acquire()
rdb.write_call('%s\n' % (4000 * 300,))
x = rdb.next('q'); assert x == 0 # number of stop points
assert rdb.done()
@@ -83,12 +87,15 @@
rdb = self.fetch_rdb([self.exename, 'Xx'])
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
+ rdb.gil_acquire()
rdb.write_call('40\n')
x = rdb.next('!h'); assert x == b # -> callback again
x = rdb.next('i'); assert x == 3 # arg n
+ rdb.gil_acquire()
rdb.write_call('3\n')
rdb.same_stack() # -> return in main thread
x = rdb.next('i'); assert x == 120 # <- return from callme()
+ rdb.gil_acquire()
rdb.write_call('120\n')
x = rdb.next('q'); assert x == 2 # number of stop points
assert rdb.done()
From pypy.commits at gmail.com Wed Aug 10 03:34:18 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 10 Aug 2016 00:34:18 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: pyconza 2016 abstract for general
pypy talk
Message-ID: <57aad8fa.81cb1c0a.d1959.5383@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5665:66529e75e820
Date: 2016-08-10 09:34 +0200
http://bitbucket.org/pypy/extradoc/changeset/66529e75e820/
Log: pyconza 2016 abstract for general pypy talk
diff --git a/blog/draft/new-jit-log.rst b/blog/draft/new-jit-log.rst
--- a/blog/draft/new-jit-log.rst
+++ b/blog/draft/new-jit-log.rst
@@ -66,7 +66,7 @@
Speed issues
------------
-VMProf is a great tool to find out hot spots that consume a lot of time in your program. As soon as you have identified code that runs slowly, you can switch to jitlog and maybe pinpoint certain aspects that do not behave as expected. You will find an overview, and are able to browse the generated code. If you cannot make sense of all that, you can just share the link with us and we can have a look too.
+VMProf is a great tool to find hot spots that consume a lot of time in your program. As soon as you have identified code that runs slowly, you can switch to jitlog and maybe pinpoint certain aspects that do not behave as expected. You will find an overview, and are able to browse the generated code. If you cannot make sense of all that, you can just share the link with us and we can have a look too.
Future direction
----------------
diff --git a/talk/pyconza2016/pypy-abstract.txt b/talk/pyconza2016/pypy-abstract.txt
new file mode 100644
--- /dev/null
+++ b/talk/pyconza2016/pypy-abstract.txt
@@ -0,0 +1,24 @@
+Optimizing Python programs, PyPy to rescue
+======================================
+
+In this talk I want to show how you can use PyPy for your benefit.
+It will kick off with a short introduction covering PyPy and its just in time
+compiler. PyPy is the most advanced Python interpreter around (besides CPython)
+and while it should generally just speed up your programs there is a wide range
+of performance that you can get out of PyPy.
+
+The first part, will cover considerations why one should write Python programs,
+and only spend fractions of the development time to optimize your program.
+The second part of this session will show and give you the knowledge and
+tools to inspect and change your program to improve it. We will cover two tools in detail:
+CFFI & VMProf.
+
+Our advanced library CFFI (C Foreign Function Interface) can easily replace
+CPython extension code. VMProf is a platform to inspect you program while it is running,
+imposing very little overhead.
+
+Throughout the talk real world examples will motivate why PyPy is a viable option
+to optimize you Python programs and present the examples' value to their developers.
+
+As a result of this talk, an audience member should be equipped with
+tools that helps him to understand and optimize programs.
From pypy.commits at gmail.com Wed Aug 10 05:39:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 10 Aug 2016 02:39:20 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: Unclear points
Message-ID: <57aaf648.56421c0a.6a424.86fb@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r5666:d93f9aca2b36
Date: 2016-08-10 11:39 +0200
http://bitbucket.org/pypy/extradoc/changeset/d93f9aca2b36/
Log: Unclear points
diff --git a/talk/pyconza2016/pypy-abstract.txt b/talk/pyconza2016/pypy-abstract.txt
--- a/talk/pyconza2016/pypy-abstract.txt
+++ b/talk/pyconza2016/pypy-abstract.txt
@@ -1,14 +1,31 @@
-Optimizing Python programs, PyPy to rescue
-======================================
+Optimizing Python programs, PyPy to the rescue
+==============================================
In this talk I want to show how you can use PyPy for your benefit.
It will kick off with a short introduction covering PyPy and its just in time
compiler. PyPy is the most advanced Python interpreter around (besides CPython)
+XXX
+XXX you seem to say "CPython is more advanced than PyPy" above,
+XXX which doesn't make sense in this sentence because you say
+XXX below that PyPy is much faster than CPython
+XXX
and while it should generally just speed up your programs there is a wide range
of performance that you can get out of PyPy.
The first part, will cover considerations why one should write Python programs,
and only spend fractions of the development time to optimize your program.
+XXX
+XXX you mean, you want to explain that developers should write in Python
+XXX and spend only a small part of their time optimizing the program?
+XXX or something else? if I'm right then you should add below something
+XXX like "The second part of this session will be about this small part
+XXX of time: in cases where you need it, then I'll show tools that..."
+XXX But I'm not sure that's what you mean because CFFI is not really
+XXX about that: I'm trying to push it as a general solution also for
+XXX CPython, without focusing too much on performance. Maybe we should
+XXX have this talk be really about PyPy, and then for the other talk
+XXX I should have both CFFI and RevDB?
+XXX
The second part of this session will show and give you the knowledge and
tools to inspect and change your program to improve it. We will cover two tools in detail:
CFFI & VMProf.
From pypy.commits at gmail.com Wed Aug 10 06:25:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Wed, 10 Aug 2016 03:25:07 -0700 (PDT)
Subject: [pypy-commit] pypy default: Write a deprecation notice in this file
Message-ID: <57ab0103.82ddc20a.5e0f1.8b48@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86129:9efa812a5f35
Date: 2016-08-10 12:24 +0200
http://bitbucket.org/pypy/pypy/changeset/9efa812a5f35/
Log: Write a deprecation notice in this file
diff --git a/include/PyPy.h b/include/PyPy.h
--- a/include/PyPy.h
+++ b/include/PyPy.h
@@ -2,7 +2,11 @@
#define _PYPY_H_
/* This header is meant to be included in programs that use PyPy as an
- embedded library. */
+ embedded library.
+
+ NOTE: this is deprecated. Instead, use cffi's embedding support:
+ http://cffi.readthedocs.org/en/latest/embedding.html
+*/
#ifdef __cplusplus
extern "C" {
From pypy.commits at gmail.com Wed Aug 10 10:11:38 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Wed, 10 Aug 2016 07:11:38 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix test_ast to check for correct
'arguments'-fields in test_fields
Message-ID: <57ab361a.274fc20a.a9df1.ef38@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86130:fffb365644a3
Date: 2016-08-10 16:10 +0200
http://bitbucket.org/pypy/pypy/changeset/fffb365644a3/
Log: Fix test_ast to check for correct 'arguments'-fields in test_fields
diff --git a/pypy/interpreter/astcompiler/test/test_ast.py b/pypy/interpreter/astcompiler/test/test_ast.py
--- a/pypy/interpreter/astcompiler/test/test_ast.py
+++ b/pypy/interpreter/astcompiler/test/test_ast.py
@@ -45,8 +45,8 @@
w_fields = space.getattr(ast.get(space).w_arguments,
space.wrap("_fields"))
assert space.eq_w(w_fields, space.wrap(
- ('args', 'vararg', 'varargannotation', 'kwonlyargs', 'kwarg',
- 'kwargannotation', 'defaults', 'kw_defaults')))
+ ('args', 'vararg', 'kwonlyargs', 'kw_defaults',
+ 'kwarg', 'defaults')))
def test_attributes(self, space):
w_attrs = space.getattr(ast.get(space).w_FunctionDef,
From pypy.commits at gmail.com Wed Aug 10 12:29:10 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Wed, 10 Aug 2016 09:29:10 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: some more translation issues that
showed up
Message-ID: <57ab5656.45c8c20a.3d264.84b3@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86131:663016b57d78
Date: 2016-08-10 18:27 +0200
http://bitbucket.org/pypy/pypy/changeset/663016b57d78/
Log: some more translation issues that showed up
diff --git a/pypy/interpreter/astcompiler/assemble.py.orig b/pypy/interpreter/astcompiler/assemble.py.orig
deleted file mode 100644
--- a/pypy/interpreter/astcompiler/assemble.py.orig
+++ /dev/null
@@ -1,765 +0,0 @@
-"""Python control flow graph generation and bytecode assembly."""
-
-import os
-from rpython.rlib import rfloat
-from rpython.rlib.objectmodel import specialize, we_are_translated
-
-from pypy.interpreter.astcompiler import ast, consts, misc, symtable
-from pypy.interpreter.error import OperationError
-from pypy.interpreter.pycode import PyCode
-from pypy.tool import stdlib_opcode as ops
-
-
-class StackDepthComputationError(Exception):
- pass
-
-
-class Instruction(object):
- """Represents a single opcode."""
-
- def __init__(self, opcode, arg=0):
- self.opcode = opcode
- self.arg = arg
- self.lineno = 0
- self.has_jump = False
-
- def size(self):
- """Return the size of bytes of this instruction when it is
- encoded.
- """
- if self.opcode >= ops.HAVE_ARGUMENT:
- return (6 if self.arg > 0xFFFF else 3)
- return 1
-
- def jump_to(self, target, absolute=False):
- """Indicate the target this jump instruction.
-
- The opcode must be a JUMP opcode.
- """
- self.jump = (target, absolute)
- self.has_jump = True
-
- def __repr__(self):
- data = [ops.opname[self.opcode]]
- template = "<%s"
- if self.opcode >= ops.HAVE_ARGUMENT:
- data.append(self.arg)
- template += " %i"
- if self.has_jump:
- data.append(self.jump[0])
- template += " %s"
- template += ">"
- return template % tuple(data)
-
-
-class Block(object):
- """A basic control flow block.
-
- It has one entry point and several possible exit points. Its
- instructions may be jumps to other blocks, or if control flow
- reaches the end of the block, it continues to next_block.
- """
-
- marked = False
- have_return = False
- auto_inserted_return = False
-
- def __init__(self):
- self.instructions = []
- self.next_block = None
-
- def _post_order_see(self, stack, nextblock):
- if nextblock.marked == 0:
- nextblock.marked = 1
- stack.append(nextblock)
-
- def post_order(self):
- """Return this block and its children in post order. This means
- that the graph of blocks is first cleaned up to ignore
- back-edges, thus turning it into a DAG. Then the DAG is
- linearized. For example:
-
- A --> B -\ => [A, D, B, C]
- \-> D ---> C
- """
- resultblocks = []
- stack = [self]
- self.marked = 1
- while stack:
- current = stack[-1]
- if current.marked == 1:
- current.marked = 2
- if current.next_block is not None:
- self._post_order_see(stack, current.next_block)
- else:
- i = current.marked - 2
- assert i >= 0
- while i < len(current.instructions):
- instr = current.instructions[i]
- i += 1
- if instr.has_jump:
- current.marked = i + 2
- self._post_order_see(stack, instr.jump[0])
- break
- else:
- resultblocks.append(current)
- stack.pop()
- resultblocks.reverse()
- return resultblocks
-
- def code_size(self):
- """Return the encoded size of all the instructions in this
- block.
- """
- i = 0
- for instr in self.instructions:
- i += instr.size()
- return i
-
- def get_code(self):
- """Encode the instructions in this block into bytecode."""
- code = []
- for instr in self.instructions:
- opcode = instr.opcode
- if opcode >= ops.HAVE_ARGUMENT:
- arg = instr.arg
- if instr.arg > 0xFFFF:
- ext = arg >> 16
- code.append(chr(ops.EXTENDED_ARG))
- code.append(chr(ext & 0xFF))
- code.append(chr(ext >> 8))
- arg &= 0xFFFF
- code.append(chr(opcode))
- code.append(chr(arg & 0xFF))
- code.append(chr(arg >> 8))
- else:
- code.append(chr(opcode))
- return ''.join(code)
-
-
-def _make_index_dict_filter(syms, flag):
- i = 0
- result = {}
- for name, scope in syms.iteritems():
- if scope == flag:
- result[name] = i
- i += 1
- return result
-
-
- at specialize.argtype(0)
-def _iter_to_dict(iterable, offset=0):
- result = {}
- index = offset
- for item in iterable:
- result[item] = index
- index += 1
- return result
-
-
-class PythonCodeMaker(ast.ASTVisitor):
- """Knows how to assemble a PyCode object."""
-
- def __init__(self, space, name, first_lineno, scope, compile_info):
- self.space = space
- self.name = name
- self.first_lineno = first_lineno
- self.compile_info = compile_info
- self.first_block = self.new_block()
- self.use_block(self.first_block)
- self.names = {}
- self.var_names = _iter_to_dict(scope.varnames)
- self.cell_vars = _make_index_dict_filter(scope.symbols,
- symtable.SCOPE_CELL)
- self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars))
- self.w_consts = space.newdict()
- self.argcount = 0
- self.kwonlyargcount = 0
- self.lineno_set = False
- self.lineno = 0
- self.add_none_to_final_return = True
-
- def new_block(self):
- return Block()
-
- def use_block(self, block):
- """Start emitting bytecode into block."""
- self.current_block = block
- self.instrs = block.instructions
-
- def use_next_block(self, block=None):
- """Set this block as the next_block for the last and use it."""
- if block is None:
- block = self.new_block()
- self.current_block.next_block = block
- self.use_block(block)
- return block
-
- def is_dead_code(self):
- """Return False if any code can be meaningfully added to the
- current block, or True if it would be dead code."""
- # currently only True after a RETURN_VALUE.
- return self.current_block.have_return
-
- def emit_op(self, op):
- """Emit an opcode without an argument."""
- instr = Instruction(op)
- if not self.lineno_set:
- instr.lineno = self.lineno
- self.lineno_set = True
- if not self.is_dead_code():
- self.instrs.append(instr)
- if op == ops.RETURN_VALUE:
- self.current_block.have_return = True
- return instr
-
- def emit_op_arg(self, op, arg):
- """Emit an opcode with an integer argument."""
- instr = Instruction(op, arg)
- if not self.lineno_set:
- instr.lineno = self.lineno
- self.lineno_set = True
- if not self.is_dead_code():
- self.instrs.append(instr)
-
- def emit_op_name(self, op, container, name):
- """Emit an opcode referencing a name."""
- self.emit_op_arg(op, self.add_name(container, name))
-
- def emit_jump(self, op, block_to, absolute=False):
- """Emit a jump opcode to another block."""
- self.emit_op(op).jump_to(block_to, absolute)
-
- def add_name(self, container, name):
- """Get the index of a name in container."""
- name = self.scope.mangle(name)
- try:
- index = container[name]
- except KeyError:
- index = len(container)
- container[name] = index
- return index
-
- def add_const(self, obj):
- """Add a W_Root to the constant array and return its location."""
- space = self.space
- # To avoid confusing equal but separate types, we hash store the type
- # of the constant in the dictionary. Moreover, we have to keep the
- # difference between -0.0 and 0.0 floats, and this recursively in
- # tuples.
- w_key = self._make_key(obj)
-
- w_len = space.finditem(self.w_consts, w_key)
- if w_len is None:
- w_len = space.len(self.w_consts)
- space.setitem(self.w_consts, w_key, w_len)
- if space.int_w(w_len) == 0:
- self.scope.doc_removable = False
- return space.int_w(w_len)
-
- def _make_key(self, obj):
- # see the tests 'test_zeros_not_mixed*' in ../test/test_compiler.py
- space = self.space
- w_type = space.type(obj)
- if space.is_w(w_type, space.w_float):
- val = space.float_w(obj)
- if val == 0.0 and rfloat.copysign(1., val) < 0:
- w_key = space.newtuple([obj, space.w_float, space.w_None])
- else:
- w_key = space.newtuple([obj, space.w_float])
- elif space.is_w(w_type, space.w_complex):
- w_real = space.getattr(obj, space.wrap("real"))
- w_imag = space.getattr(obj, space.wrap("imag"))
- real = space.float_w(w_real)
- imag = space.float_w(w_imag)
- real_negzero = (real == 0.0 and
- rfloat.copysign(1., real) < 0)
- imag_negzero = (imag == 0.0 and
- rfloat.copysign(1., imag) < 0)
- if real_negzero and imag_negzero:
- tup = [obj, space.w_complex, space.w_None, space.w_None,
- space.w_None]
- elif imag_negzero:
- tup = [obj, space.w_complex, space.w_None, space.w_None]
- elif real_negzero:
- tup = [obj, space.w_complex, space.w_None]
- else:
- tup = [obj, space.w_complex]
- w_key = space.newtuple(tup)
- elif space.is_w(w_type, space.w_tuple):
- result_w = [obj, w_type]
- for w_item in space.fixedview(obj):
- result_w.append(self._make_key(w_item))
- w_key = space.newtuple(result_w[:])
- elif isinstance(obj, PyCode):
- w_key = space.newtuple([obj, w_type, space.id(obj)])
- else:
- w_key = space.newtuple([obj, w_type])
- return w_key
-
- def load_const(self, obj):
- index = self.add_const(obj)
- self.emit_op_arg(ops.LOAD_CONST, index)
-
- def update_position(self, lineno, force=False):
- """Possibly change the lineno for the next instructions."""
- if force or lineno > self.lineno:
- self.lineno = lineno
- self.lineno_set = False
-
- def _resolve_block_targets(self, blocks):
- """Compute the arguments of jump instructions."""
- last_extended_arg_count = 0
- # The reason for this loop is extended jumps. EXTENDED_ARG
- # extends the bytecode size, so it might invalidate the offsets
- # we've already given. Thus we have to loop until the number of
- # extended args is stable. Any extended jump at all is
- # extremely rare, so performance is not too concerning.
- while True:
- extended_arg_count = 0
- offset = 0
- force_redo = False
- # Calculate the code offset of each block.
- for block in blocks:
- block.offset = offset
- offset += block.code_size()
- for block in blocks:
- offset = block.offset
- for instr in block.instructions:
- offset += instr.size()
- if instr.has_jump:
- target, absolute = instr.jump
- op = instr.opcode
- # Optimize an unconditional jump going to another
- # unconditional jump.
- if op == ops.JUMP_ABSOLUTE or op == ops.JUMP_FORWARD:
- if target.instructions:
- target_op = target.instructions[0].opcode
- if target_op == ops.JUMP_ABSOLUTE:
- target = target.instructions[0].jump[0]
- instr.opcode = ops.JUMP_ABSOLUTE
- absolute = True
- elif target_op == ops.RETURN_VALUE:
- # Replace JUMP_* to a RETURN into
- # just a RETURN
- instr.opcode = ops.RETURN_VALUE
- instr.arg = 0
- instr.has_jump = False
- # The size of the code changed,
- # we have to trigger another pass
- force_redo = True
- continue
- if absolute:
- jump_arg = target.offset
- else:
- jump_arg = target.offset - offset
- instr.arg = jump_arg
- if jump_arg > 0xFFFF:
- extended_arg_count += 1
- if (extended_arg_count == last_extended_arg_count and
- not force_redo):
- break
- else:
- last_extended_arg_count = extended_arg_count
-
- def _build_consts_array(self):
- """Turn the applevel constants dictionary into a list."""
- w_consts = self.w_consts
- space = self.space
- consts_w = [space.w_None] * space.len_w(w_consts)
- w_iter = space.iter(w_consts)
- first = space.wrap(0)
- while True:
- try:
- w_key = space.next(w_iter)
- except OperationError as e:
- if not e.match(space, space.w_StopIteration):
- raise
- break
- w_index = space.getitem(w_consts, w_key)
- w_constant = space.getitem(w_key, first)
- w_constant = misc.intern_if_common_string(space, w_constant)
- consts_w[space.int_w(w_index)] = w_constant
- return consts_w
-
- def _get_code_flags(self):
- """Get an extra flags that should be attached to the code object."""
- raise NotImplementedError
-
- def _stacksize(self, blocks):
- """Compute co_stacksize."""
- for block in blocks:
- block.initial_depth = 0
- # Assumes that it is sufficient to walk the blocks in 'post-order'.
- # This means we ignore all back-edges, but apart from that, we only
- # look into a block when all the previous blocks have been done.
- self._max_depth = 0
- for block in blocks:
- depth = self._do_stack_depth_walk(block)
- if block.auto_inserted_return and depth != 0:
- os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
- self.compile_info.filename, self.name, self.first_lineno))
- raise StackDepthComputationError # fatal error
- return self._max_depth
-
- def _next_stack_depth_walk(self, nextblock, depth):
- if depth > nextblock.initial_depth:
- nextblock.initial_depth = depth
-
- def _do_stack_depth_walk(self, block):
- depth = block.initial_depth
- for instr in block.instructions:
- depth += _opcode_stack_effect(instr.opcode, instr.arg)
- if depth >= self._max_depth:
- self._max_depth = depth
- jump_op = instr.opcode
- if instr.has_jump:
- target_depth = depth
- if jump_op == ops.FOR_ITER:
- target_depth -= 2
- elif (jump_op == ops.SETUP_FINALLY or
- jump_op == ops.SETUP_EXCEPT or
- jump_op == ops.SETUP_WITH):
- if jump_op == ops.SETUP_FINALLY:
- target_depth += 4
- elif jump_op == ops.SETUP_EXCEPT:
- target_depth += 4
- elif jump_op == ops.SETUP_WITH:
- target_depth += 3
- if target_depth > self._max_depth:
- self._max_depth = target_depth
- elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
- jump_op == ops.JUMP_IF_FALSE_OR_POP):
- depth -= 1
- self._next_stack_depth_walk(instr.jump[0], target_depth)
- if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
- # Nothing more can occur.
- break
- elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
- # Nothing more can occur.
- break
- else:
- if block.next_block:
- self._next_stack_depth_walk(block.next_block, depth)
- return depth
-
- def _build_lnotab(self, blocks):
- """Build the line number table for tracebacks and tracing."""
- current_line = self.first_lineno
- current_off = 0
- table = []
- push = table.append
- for block in blocks:
- offset = block.offset
- for instr in block.instructions:
- if instr.lineno:
- # compute deltas
- line = instr.lineno - current_line
- if line < 0:
- continue
- addr = offset - current_off
- # Python assumes that lineno always increases with
- # increasing bytecode address (lnotab is unsigned
- # char). Depending on when SET_LINENO instructions
- # are emitted this is not always true. Consider the
- # code:
- # a = (1,
- # b)
- # In the bytecode stream, the assignment to "a"
- # occurs after the loading of "b". This works with
- # the C Python compiler because it only generates a
- # SET_LINENO instruction for the assignment.
- if line or addr:
- while addr > 255:
- push(chr(255))
- push(chr(0))
- addr -= 255
- while line > 255:
- push(chr(addr))
- push(chr(255))
- line -= 255
- addr = 0
- push(chr(addr))
- push(chr(line))
- current_line = instr.lineno
- current_off = offset
- offset += instr.size()
- return ''.join(table)
-
- def assemble(self):
- """Build a PyCode object."""
- # Unless it's interactive, every code object must end in a return.
- if not self.current_block.have_return:
- self.use_next_block()
- if self.add_none_to_final_return:
- self.load_const(self.space.w_None)
- self.emit_op(ops.RETURN_VALUE)
- self.current_block.auto_inserted_return = True
- # Set the first lineno if it is not already explicitly set.
- if self.first_lineno == -1:
- if self.first_block.instructions:
- self.first_lineno = self.first_block.instructions[0].lineno
- else:
- self.first_lineno = 1
- blocks = self.first_block.post_order()
- self._resolve_block_targets(blocks)
- lnotab = self._build_lnotab(blocks)
- stack_depth = self._stacksize(blocks)
- consts_w = self._build_consts_array()
- names = _list_from_dict(self.names)
- var_names = _list_from_dict(self.var_names)
- cell_names = _list_from_dict(self.cell_vars)
- free_names = _list_from_dict(self.free_vars, len(cell_names))
- flags = self._get_code_flags()
- # (Only) inherit compilerflags in PyCF_MASK
- flags |= (self.compile_info.flags & consts.PyCF_MASK)
- bytecode = ''.join([block.get_code() for block in blocks])
- return PyCode(self.space,
- self.argcount,
- self.kwonlyargcount,
- len(self.var_names),
- stack_depth,
- flags,
- bytecode,
- list(consts_w),
- names,
- var_names,
- self.compile_info.filename,
- self.name,
- self.first_lineno,
- lnotab,
- free_names,
- cell_names,
- self.compile_info.hidden_applevel)
-
-
-def _list_from_dict(d, offset=0):
- result = [None] * len(d)
- for obj, index in d.iteritems():
- result[index - offset] = obj
- return result
-
-
-_static_opcode_stack_effects = {
- ops.NOP: 0,
-
- ops.POP_TOP: -1,
- ops.ROT_TWO: 0,
- ops.ROT_THREE: 0,
- ops.DUP_TOP: 1,
- ops.DUP_TOP_TWO: 2,
-
- ops.UNARY_POSITIVE: 0,
- ops.UNARY_NEGATIVE: 0,
- ops.UNARY_NOT: 0,
- ops.UNARY_INVERT: 0,
-
- ops.LIST_APPEND: -1,
- ops.SET_ADD: -1,
- ops.MAP_ADD: -2,
-<<<<<<< local
-=======
- # XXX
- ops.STORE_MAP: -2,
->>>>>>> other
-
- ops.BINARY_POWER: -1,
- ops.BINARY_MULTIPLY: -1,
- ops.BINARY_MODULO: -1,
- ops.BINARY_ADD: -1,
- ops.BINARY_SUBTRACT: -1,
- ops.BINARY_SUBSCR: -1,
- ops.BINARY_FLOOR_DIVIDE: -1,
- ops.BINARY_TRUE_DIVIDE: -1,
- ops.BINARY_MATRIX_MULTIPLY: -1,
- ops.BINARY_LSHIFT: -1,
- ops.BINARY_RSHIFT: -1,
- ops.BINARY_AND: -1,
- ops.BINARY_OR: -1,
- ops.BINARY_XOR: -1,
-
- ops.INPLACE_FLOOR_DIVIDE: -1,
- ops.INPLACE_TRUE_DIVIDE: -1,
- ops.INPLACE_ADD: -1,
- ops.INPLACE_SUBTRACT: -1,
- ops.INPLACE_MULTIPLY: -1,
- ops.INPLACE_MODULO: -1,
- ops.INPLACE_POWER: -1,
- ops.INPLACE_MATRIX_MULTIPLY: -1,
- ops.INPLACE_LSHIFT: -1,
- ops.INPLACE_RSHIFT: -1,
- ops.INPLACE_AND: -1,
- ops.INPLACE_OR: -1,
- ops.INPLACE_XOR: -1,
-
- ops.STORE_SUBSCR: -3,
- ops.DELETE_SUBSCR: -2,
-
- ops.GET_ITER: 0,
- ops.FOR_ITER: 1,
- ops.BREAK_LOOP: 0,
- ops.CONTINUE_LOOP: 0,
- ops.SETUP_LOOP: 0,
-
- ops.PRINT_EXPR: -1,
-
-<<<<<<< local
- ops.WITH_CLEANUP_START: -1,
- ops.WITH_CLEANUP_FINISH: -1, # XXX Sometimes more
-=======
- # TODO
- ops.WITH_CLEANUP: -1,
->>>>>>> other
- ops.LOAD_BUILD_CLASS: 1,
-<<<<<<< local
-=======
- # TODO
- ops.STORE_LOCALS: -1,
->>>>>>> other
- ops.POP_BLOCK: 0,
- ops.POP_EXCEPT: -1,
- ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY
- # pushes 4. In truth, it would only push 1 and
- # the corresponding END_FINALLY only pops 1.
- ops.SETUP_WITH: 1,
- ops.SETUP_FINALLY: 0,
- ops.SETUP_EXCEPT: 0,
-
- ops.RETURN_VALUE: -1,
- ops.YIELD_VALUE: 0,
- ops.YIELD_FROM: -1,
- ops.COMPARE_OP: -1,
-
- # TODO
- ops.LOOKUP_METHOD: 1,
-
- ops.LOAD_NAME: 1,
- ops.STORE_NAME: -1,
- ops.DELETE_NAME: 0,
-
- ops.LOAD_FAST: 1,
- ops.STORE_FAST: -1,
- ops.DELETE_FAST: 0,
-
- ops.LOAD_ATTR: 0,
- ops.STORE_ATTR: -2,
- ops.DELETE_ATTR: -1,
-
- ops.LOAD_GLOBAL: 1,
- ops.STORE_GLOBAL: -1,
- ops.DELETE_GLOBAL: 0,
- ops.DELETE_DEREF: 0,
-
- ops.LOAD_CLOSURE: 1,
- ops.LOAD_DEREF: 1,
- ops.STORE_DEREF: -1,
- ops.DELETE_DEREF: 0,
-
- ops.LOAD_CONST: 1,
-
- ops.IMPORT_STAR: -1,
- ops.IMPORT_NAME: -1,
- ops.IMPORT_FROM: 1,
-
- ops.JUMP_FORWARD: 0,
- ops.JUMP_ABSOLUTE: 0,
- ops.JUMP_IF_TRUE_OR_POP: 0,
- ops.JUMP_IF_FALSE_OR_POP: 0,
- ops.POP_JUMP_IF_TRUE: -1,
- ops.POP_JUMP_IF_FALSE: -1,
- # TODO
- ops.JUMP_IF_NOT_DEBUG: 0,
-
- # TODO
- ops.BUILD_LIST_FROM_ARG: 1,
-}
-
-
-def _compute_UNPACK_SEQUENCE(arg):
- return arg - 1
-
-def _compute_UNPACK_EX(arg):
- return (arg & 0xFF) + (arg >> 8)
-
-def _compute_BUILD_TUPLE(arg):
- return 1 - arg
-
-def _compute_BUILD_LIST(arg):
- return 1 - arg
-
-def _compute_BUILD_SET(arg):
- return 1 - arg
-
-def _compute_BUILD_MAP(arg):
- return 1 - 2 * arg
-
-def _compute_BUILD_MAP_UNPACK(arg):
- return 1 - arg
-
-def _compute_MAKE_CLOSURE(arg):
- return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
-
-def _compute_MAKE_FUNCTION(arg):
- return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
-
-def _compute_BUILD_SLICE(arg):
- if arg == 3:
- return -2
- else:
- return -1
-
-def _compute_RAISE_VARARGS(arg):
- return -arg
-
-def _num_args(oparg):
- return (oparg % 256) + 2 * ((oparg // 256) % 256)
-
-def _compute_CALL_FUNCTION(arg):
- return -_num_args(arg)
-
-def _compute_CALL_FUNCTION_VAR(arg):
- return -_num_args(arg) - 1
-
-def _compute_CALL_FUNCTION_KW(arg):
- return -_num_args(arg) - 1
-
-def _compute_CALL_FUNCTION_VAR_KW(arg):
- return -_num_args(arg) - 2
-
-def _compute_CALL_METHOD(arg):
- return -_num_args(arg) - 1
-
-
-_stack_effect_computers = {}
-for name, func in globals().items():
- if name.startswith("_compute_"):
- func._always_inline_ = True
- _stack_effect_computers[getattr(ops, name[9:])] = func
-for op, value in _static_opcode_stack_effects.iteritems():
- def func(arg, _value=value):
- return _value
- func._always_inline_ = True
- _stack_effect_computers[op] = func
-del name, func, op, value
-
-
-def _opcode_stack_effect(op, arg):
- """Return the stack effect of a opcode an its argument."""
- if we_are_translated():
- for possible_op in ops.unrolling_opcode_descs:
- # EXTENDED_ARG should never get in here.
- if possible_op.index == ops.EXTENDED_ARG:
- continue
- if op == possible_op.index:
- return _stack_effect_computers[possible_op.index](arg)
- else:
- raise AssertionError("unknown opcode: %s" % (op,))
- else:
- try:
- return _static_opcode_stack_effects[op]
- except KeyError:
- try:
- return _stack_effect_computers[op](arg)
- except KeyError:
- raise KeyError("Unknown stack effect for %s (%s)" %
- (ops.opname[op], op))
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -4,6 +4,7 @@
from pypy.interpreter.pyparser.pygram import syms, tokens
from pypy.interpreter.pyparser.error import SyntaxError
from pypy.interpreter.pyparser import parsestring
+from rpython.rlib.objectmodel import always_inline
def ast_from_node(space, node, compile_info):
@@ -1169,6 +1170,7 @@
raise
return self.space.call_function(self.space.w_float, w_num_str)
+ @always_inline
def handle_dictelement(self, node, i):
if node.get_child(i).type == tokens.DOUBLESTAR:
key = None
@@ -1178,7 +1180,7 @@
key = self.handle_expr(node.get_child(i))
value = self.handle_expr(node.get_child(i+2))
i += 3
- return [i,key,value]
+ return (i,key,value)
def handle_atom(self, atom_node):
first_child = atom_node.get_child(0)
@@ -1374,10 +1376,7 @@
set_maker.get_column())
def handle_dictcomp(self, dict_maker):
- dictelement = self.handle_dictelement(dict_maker, 0)
- i = dictelement[0]
- key = dictelement[1]
- value = dictelement[2]
+ i, key, value = self.handle_dictelement(dict_maker, 0)
comps = self.comprehension_helper(dict_maker.get_child(i))
return ast.DictComp(key, value, comps, dict_maker.get_lineno(),
dict_maker.get_column())
@@ -1387,10 +1386,9 @@
values = []
i = 0
while i < node.num_children():
- dictelement = self.handle_dictelement(node, i)
- i = dictelement[0]
- keys.append(dictelement[1])
- values.append(dictelement[2])
+ i, key, value = self.handle_dictelement(node, i)
+ keys.append(key)
+ values.append(value)
i += 1
return ast.Dict(keys, values, node.get_lineno(), node.get_column())
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -441,8 +441,7 @@
# 4. load class name
self.load_const(self.space.wrap(cls.name.decode('utf-8')))
# 5. generate the rest of the code for the call
- self._make_call(2,
- cls.bases, cls.keywords)
+ self._make_call(2, cls.bases, cls.keywords)
# 6. apply decorators
if cls.decorator_list:
for i in range(len(cls.decorator_list)):
@@ -1348,8 +1347,7 @@
if self._optimize_method_call(call):
return
call.func.walkabout(self)
- self._make_call(0,
- call.args, call.keywords)
+ self._make_call(0, call.args, call.keywords)
def _call_has_no_star_args(self, call):
if call.args is not None:
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1056,7 +1056,7 @@
w_value = self.popvalue()
w_gen = self.peekvalue()
if isinstance(w_gen, Coroutine):
- if (w_gen.descr_gi_code(w_gen).co_flags & consts.CO_COROUTINE and
+ if (w_gen.descr_gi_code(space).co_flags & consts.CO_COROUTINE and
not self.pycode.co_flags & (consts.CO_COROUTINE |
consts.CO_ITERABLE_COROUTINE)):
raise oefmt(self.space.w_TypeError,
@@ -1479,27 +1479,29 @@
self.pushvalue(res)
def BEFORE_ASYNC_WITH(self, oparg, next_instr):
+ space = self.space
w_manager = self.peekvalue()
- w_enter = self.space.lookup(w_manager, "__aenter__")
- w_descr = self.space.lookup(w_manager, "__aexit__")
+ w_enter = space.lookup(w_manager, "__aenter__")
+ w_descr = space.lookup(w_manager, "__aexit__")
if w_enter is None or w_descr is None:
- raise oefmt(self.space.w_AttributeError,
+ raise oefmt(space.w_AttributeError,
"'%T' object is not a context manager (no __aenter__/"
"__aexit__ method)", w_manager)
- w_exit = self.space.get(w_descr, w_manager)
+ w_exit = space.get(w_descr, w_manager)
self.settopvalue(w_exit)
- w_result = self.space.get_and_call_function(w_enter, w_manager)
+ w_result = space.get_and_call_function(w_enter, w_manager)
self.pushvalue(w_result)
def GET_AITER(self, oparg, next_instr):
+ space = self.space
w_obj = self.peekvalue()
- w_func = self.space.lookup(w_obj, "__aiter__")
+ w_func = space.lookup(w_obj, "__aiter__")
if w_func is None:
raise oefmt(space.w_AttributeError,
"object %T does not have __aiter__ method",
w_obj)
w_iter = space.get_and_call_function(w_func, w_obj)
- w_awaitable = w_iter._GetAwaitableIter(self.space)
+ w_awaitable = w_iter._GetAwaitableIter(space)
if w_awaitable is None:
raise oefmt(space.w_TypeError,
"'async for' received an invalid object "
@@ -1507,14 +1509,15 @@
self.settopvalue(w_awaitable)
def GET_ANEXT(self, oparg, next_instr):
+ space = self.space
w_aiter = self.peekvalue()
- w_func = self.space.lookup(w_aiter, "__anext__")
+ w_func = space.lookup(w_aiter, "__anext__")
if w_func is None:
raise oefmt(space.w_AttributeError,
"object %T does not have __anext__ method",
w_aiter)
w_next_iter = space.get_and_call_function(w_func, w_aiter)
- w_awaitable = w_next_iter._GetAwaitableIter(self.space)
+ w_awaitable = w_next_iter._GetAwaitableIter(space)
if w_awaitable is None:
raise oefmt(space.w_TypeError,
"'async for' received an invalid object "
diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py
--- a/pypy/module/zipimport/interp_zipimport.py
+++ b/pypy/module/zipimport/interp_zipimport.py
@@ -85,7 +85,7 @@
def iteratekeys(self, space):
return space.iter(self.keys(space))
- def itervalues(self, space):
+ def iteratevalues(self, space):
return space.iter(self.values(space))
def iteritems(self, space):
@@ -112,7 +112,7 @@
keys = interp2app(W_ZipCache.keys),
iterkeys = interp2app(W_ZipCache.iteratekeys),
values = interp2app(W_ZipCache.values),
- itervalues = interp2app(W_ZipCache.itervalues),
+ itervalues = interp2app(W_ZipCache.iteratevalues),
clear = interp2app(W_ZipCache.clear),
__delitem__ = interp2app(W_ZipCache.delitem),
)
From pypy.commits at gmail.com Thu Aug 11 03:54:48 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 00:54:48 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Correctly close the remote end
of the socketpair. Otherwise, the
Message-ID: <57ac2f48.45c8c20a.dc13a.67fa@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86132:5797950d42c5
Date: 2016-08-11 09:53 +0200
http://bitbucket.org/pypy/pypy/changeset/5797950d42c5/
Log: Correctly close the remote end of the socketpair. Otherwise, the
subprocess may remain alive forever, trying to read from its own end
of the socketpair, because it also happens to keep the remote end
open.
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -228,7 +228,7 @@
s1, s2 = socket.socketpair()
initial_subproc = subprocess.Popen(
[executable, '--revdb-replay', revdb_log_filename,
- str(s2.fileno())])
+ str(s2.fileno())], preexec_fn=s1.close)
s2.close()
child = ReplayProcess(initial_subproc.pid, s1,
linecacheoutput=linecacheoutput)
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -263,7 +263,7 @@
s1, s2 = socket.socketpair()
subproc = subprocess.Popen(
[str(self.exename), '--revdb-replay', str(self.rdbname),
- str(s2.fileno())], **kwds)
+ str(s2.fileno())], preexec_fn=s1.close, **kwds)
s2.close()
self.subproc = subproc
child = ReplayProcess(subproc.pid, s1)
From pypy.commits at gmail.com Thu Aug 11 03:54:50 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 00:54:50 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: in-progress
Message-ID: <57ac2f4a.c75dc20a.853ec.6920@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86133:710590b97b42
Date: 2016-08-11 09:54 +0200
http://bitbucket.org/pypy/pypy/changeset/710590b97b42/
Log: in-progress
diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c
--- a/rpython/translator/c/src/entrypoint.c
+++ b/rpython/translator/c/src/entrypoint.c
@@ -101,10 +101,10 @@
RPython_StartupCode();
+#ifndef RPY_REVERSE_DEBUGGER
exitcode = STANDALONE_ENTRY_POINT(argc, argv);
-
-#ifdef RPY_REVERSE_DEBUGGER
- rpy_reverse_db_teardown();
+#else
+ exitcode = rpy_reverse_db_main(STANDALONE_ENTRY_POINT, argc, argv);
#endif
pypy_debug_alloc_results();
diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py
--- a/rpython/translator/revdb/gencsupp.py
+++ b/rpython/translator/revdb/gencsupp.py
@@ -84,16 +84,16 @@
# that the calls should really be done
#
# hack: we don't need the flag for at least this common function
+ if call_code == 'RPyGilAcquire();':
+ return 'RPY_REVDB_CALL_GILCTRL(%s);' % (call_code,)
if call_code == 'RPyGilRelease();':
- return 'RPY_REVDB_CALL_GILCTRL(%s);' % (call_code,)
- if call_code == 'RPyGilAcquire();':
# Could also work with a regular RPY_REVDB_CALL_VOID, but we
# use a different byte (0xFD instead of 0xFC) to detect more
# sync misses. In a single-threaded environment this 0xFD
# byte is not needed at all, but in a multi-threaded
- # environment it ensures that during replaying, we don't go
- # past the RPyGilAcquire() in case a different thread must run
- # next.
+ # environment it ensures that during replaying, just after
+ # reading the 0xFD, we switch to a different thread if needed
+ # (actually implemented with stacklets).
return 'RPY_REVDB_CALL_GIL(%s);' % (call_code,)
#
tp = funcgen.lltypename(v_result)
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -44,9 +44,10 @@
typedef struct {
Signed version;
- uint64_t reserved1, reserved2;
+ uint64_t main_thread_id;
+ uint64_t reserved2;
void *ptr1, *ptr2;
- int reversed3;
+ int reserved3;
int argc;
char **argv;
} rdb_header_t;
@@ -130,8 +131,7 @@
_RPY_REVDB_UNLOCK();
}
-RPY_EXTERN
-void rpy_reverse_db_teardown(void)
+static void reverse_db_teardown(void)
{
uint64_t stop_points;
if (!RPY_RDB_REPLAY) {
@@ -237,6 +237,7 @@
h.ptr2 = &rpy_revdb;
h.argc = argc;
h.argv = argv;
+ h.main_thread_id = (uint64_t)pthread_self();
write_all((const char *)&h, sizeof(h));
/* write the whole content of rpy_rdb_struct */
@@ -254,7 +255,7 @@
rpy_revdb.buf_limit = rpy_rev_buffer + sizeof(rpy_rev_buffer) - 32;
rpy_revdb.unique_id_seen = 1;
- rpy_active_thread = 0; /* write an ASYNC_THREAD_SWITCH first in the log */
+ rpy_active_thread = 1;
rpy_active_thread_ptr = &rpy_active_thread;
pthread_atfork(NULL, NULL, close_revdb_fileno_in_fork_child);
@@ -625,6 +626,7 @@
*/
#include "src-revdb/fd_recv.c"
+#include "src/stacklet/stacklet.c" /* for replaying threads */
#define INIT_VERSION_NUMBER 0xd80100
@@ -658,6 +660,148 @@
static uint64_t *future_ids, *future_next_id;
static void *finalizer_tree, *destructor_tree;
+static stacklet_thread_handle st_thread;
+static stacklet_handle st_outer_controller_h;
+static uint64_t current_thread_id, target_thread_id;
+static void *thread_tree_root;
+
+
+struct replay_thread_main_s {
+ Signed (*entry_point)(Signed, char **);
+ int argc;
+ char **argv;
+};
+struct replay_thread_s {
+ uint64_t tid;
+ stacklet_handle h;
+};
+
+static stacklet_handle replay_thread_main(stacklet_handle h, void *arg)
+{
+ /* main thread starts */
+ struct replay_thread_main_s *m = arg;
+ st_outer_controller_h = h;
+ m->entry_point(m->argc, m->argv);
+
+ /* main thread finished, program stops */
+ reverse_db_teardown();
+
+ /* unreachable */
+ abort();
+}
+
+static void replay_invoke_callback(unsigned char e);
+
+static stacklet_handle replay_thread_sub(stacklet_handle h, void *ignored)
+{
+ /* A non-main thread starts. What is does is invoke a "callback",
+ which is the argument passed to rthread.ll_start_new_thread().
+ We get it here because the first thing stored in the log about
+ this thread should be a callback identifier.
+ */
+ unsigned char e1;
+ st_outer_controller_h = h;
+
+ if (rpy_revdb.buf_limit >= rpy_revdb.buf_p)
+ rpy_reverse_db_fetch(__FILE__, __LINE__);
+
+ _RPY_REVDB_EMIT_REPLAY(unsigned char _e, e1)
+ replay_invoke_callback(e1);
+
+ /* the thread finishes here. Return to the outer controller. */
+ return st_outer_controller_h;
+}
+
+static int compare_replay_thread(const void *a, const void *b)
+{
+ uint64_t ta = ((const struct replay_thread_s *)a)->tid;
+ uint64_t tb = ((const struct replay_thread_s *)b)->tid;
+ if (ta < tb)
+ return -1;
+ if (ta == tb)
+ return 0;
+ else
+ return 1;
+}
+
+RPY_EXTERN
+int rpy_reverse_db_main(Signed entry_point(Signed, char**),
+ int argc, char **argv)
+{
+ if (!RPY_RDB_REPLAY) {
+ int exitcode = (int)entry_point(argc, argv);
+ reverse_db_teardown();
+ return exitcode;
+ }
+ else {
+ /* start the entry point inside a new stacklet, so that we
+ can switch it away at any point later */
+ struct replay_thread_main_s m;
+ stacklet_handle h;
+ m.entry_point = entry_point;
+ m.argc = argc;
+ m.argv = argv;
+ h = stacklet_new(st_thread, replay_thread_main, &m);
+
+ /* We reach this point only if we start a second thread. This
+ is done by revdb_switch_thread(), which switches back to
+ 'st_outer_controller_h'. This is the outer controller
+ loop.
+ */
+ attach_gdb();
+ while (1) {
+ struct replay_thread_s *node, **item, dummy;
+
+ if (h == NULL)
+ goto out_of_memory;
+
+ if (h != EMPTY_STACKLET_HANDLE) {
+ /* save 'h' as the stacklet handle for the thread
+ 'current_thread_id' */
+ node = malloc(sizeof(struct replay_thread_s));
+ if (!node)
+ goto out_of_memory;
+ node->tid = current_thread_id;
+ node->h = h;
+ item = tsearch(node, &thread_tree_root, compare_replay_thread);
+ if (item == NULL)
+ goto out_of_memory;
+
+ if (*item != node) {
+ fprintf(stderr, "thread switch: duplicate thread\n");
+ exit(1);
+ }
+ }
+ else {
+ /* current_thread_id terminated */
+ }
+
+ /* fetch out (and delete) the handle for the target thread */
+ current_thread_id = target_thread_id;
+ dummy.tid = target_thread_id;
+ item = tfind(&dummy, &thread_tree_root, compare_replay_thread);
+ if (item == NULL) {
+ /* it's a new thread, start it now */
+ h = stacklet_new(st_thread, replay_thread_sub, NULL);
+ }
+ else {
+ node = *item;
+ assert(node->tid == target_thread_id);
+ h = node->h;
+ tdelete(node, &thread_tree_root, compare_replay_thread);
+ free(node);
+
+ h = stacklet_switch(h);
+ }
+ }
+ abort(); /* unreachable */
+
+ out_of_memory:
+ fprintf(stderr, "thread switch: out of memory\n");
+ exit(1);
+ }
+}
+
RPY_EXTERN
void attach_gdb(void)
{
@@ -796,6 +940,7 @@
(long)h.version, (long)RDB_VERSION);
exit(1);
}
+ current_thread_id = h.main_thread_id;
if (h.ptr1 != &rpy_reverse_db_stop_point ||
h.ptr2 != &rpy_revdb) {
fprintf(stderr,
@@ -833,6 +978,7 @@
set_revdb_breakpoints();
empty_string = make_rpy_string(0);
+ st_thread = stacklet_newthread(); /* replaying doesn't use real threads */
write_answer(ANSWER_INIT, INIT_VERSION_NUMBER, total_stop_points, 0);
@@ -887,8 +1033,6 @@
fprintf(stderr, "bad log format: incomplete packet\n");
exit(1);
}
-
- read_next_packet:
keep = rpy_revdb.buf_readend - rpy_revdb.buf_p;
assert(keep >= 0);
@@ -923,8 +1067,13 @@
return;
case ASYNC_THREAD_SWITCH:
- fetch_async_block();
- goto read_next_packet;
+ target_thread_id = fetch_async_block();
+ _RPY_REVDB_PRINT("[THRD]", target_thread_id);
+ rpy_revdb.buf_limit = rpy_revdb.buf_p;
+ st_outer_controller_h = stacklet_switch(st_outer_controller_h);
+ if (rpy_revdb.buf_limit == rpy_revdb.buf_p)
+ rpy_reverse_db_fetch(__FILE__, __LINE__);
+ return;
default:
fprintf(stderr, "bad packet header %d\n", (int)header);
@@ -1157,7 +1306,6 @@
memcpy(future_ids, extra, cmd->extra_size);
future_ids[cmd->extra_size / sizeof(uint64_t)] = 0;
uid_break = *future_ids;
- //attach_gdb();
}
future_next_id = future_ids;
}
@@ -1501,6 +1649,22 @@
RPY_CALLBACKLOCS /* macro from revdb_def.h */
};
+static void replay_invoke_callback(unsigned char e)
+{
+ unsigned long index;
+ unsigned char e2;
+ void (*pfn)(void);
+ _RPY_REVDB_EMIT_REPLAY(unsigned char _e, e2)
+ index = (e << 8) | e2;
+ index -= 300;
+ if (index >= (sizeof(callbacklocs) / sizeof(callbacklocs[0]))) {
+ fprintf(stderr, "bad callback index %lx\n", index);
+ exit(1);
+ }
+ pfn = callbacklocs[index];
+ pfn();
+}
+
RPY_EXTERN
void rpy_reverse_db_invoke_callback(unsigned char e)
{
@@ -1509,19 +1673,7 @@
callback identifier. */
do {
- unsigned long index;
- unsigned char e2;
- void (*pfn)(void);
- _RPY_REVDB_EMIT_REPLAY(unsigned char _e, e2)
- index = (e << 8) | e2;
- index -= 300;
- if (index >= (sizeof(callbacklocs) / sizeof(callbacklocs[0]))) {
- fprintf(stderr, "bad callback index\n");
- exit(1);
- }
- pfn = callbacklocs[index];
- pfn();
-
+ replay_invoke_callback(e);
_RPY_REVDB_EMIT_REPLAY(unsigned char _e, e)
} while (e != 0xFC);
}
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -29,7 +29,8 @@
/* ------------------------------------------------------------ */
RPY_EXTERN void rpy_reverse_db_setup(int *argc_p, char **argv_p[]);
-RPY_EXTERN void rpy_reverse_db_teardown(void);
+RPY_EXTERN int rpy_reverse_db_main(Signed entry_point(Signed, char**),
+ int argc, char **argv);
/* enable to print locations to stderr of all the EMITs */
#ifdef RPY_REVDB_PRINT_ALL
@@ -92,7 +93,7 @@
char *_end1 = _src + sizeof(_e); \
memcpy(&_e, _src, sizeof(_e)); \
rpy_revdb.buf_p = _end1; \
- _RPY_REVDB_PRINT("[read]", _e); \
+ _RPY_REVDB_PRINT("[ rd ]", _e); \
if (_end1 >= rpy_revdb.buf_limit) \
rpy_reverse_db_fetch(__FILE__, __LINE__); \
variable = _e; \
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -25,7 +25,7 @@
assert header == 'RevDB:\t' + '\t'.join(expected_argv) + '\n\x00'
#
x = self.read1('P'); assert x == 0x00FF0003
- x = self.read1('P'); assert x == 0
+ x = self.read1('P'); self.main_thread_id = x
x = self.read1('P'); assert x == 0
x = self.read1('P'); #assert x == &rpy_reverse_db_stop_point
x = self.read1('P'); #assert x == &rpy_revdb
@@ -33,7 +33,6 @@
self.argc = self.read1('i')
self.argv = self.read1('P')
self.current_packet_end = self.cur
- self.main_thread_id = self.switch_thread()
self.read_check_argv(expected_argv)
def read1(self, mode):
@@ -91,16 +90,16 @@
def write_call(self, expected_string):
x = self.next() # raw_malloc: the pointer we got
+ self.gil_release()
self.same_stack() # write
x = self.next(); assert x == len(expected_string)
self.same_stack() # errno
x = self.next('i'); assert x == 0 # errno
- self.gil_acquire()
def same_stack(self):
x = self.next('c'); assert x == '\xFC'
- def gil_acquire(self):
+ def gil_release(self):
x = self.next('c'); assert x == '\xFD'
def switch_thread(self, expected=None):
diff --git a/rpython/translator/revdb/test/test_callback.py b/rpython/translator/revdb/test/test_callback.py
--- a/rpython/translator/revdb/test/test_callback.py
+++ b/rpython/translator/revdb/test/test_callback.py
@@ -63,19 +63,19 @@
self.compile(main, backendopt=False)
out = self.run('Xx')
rdb = self.fetch_rdb([self.exename, 'Xx'])
+ rdb.gil_release()
rdb.same_stack() # callmesimple()
x = rdb.next('i'); assert x == 55555
- rdb.gil_acquire()
rdb.write_call('55555\n')
+ rdb.gil_release()
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
- rdb.gil_acquire()
+ rdb.gil_release()
x = rdb.next('!h'); assert x == b # -> callback
x = rdb.next('i'); assert x == 3 # arg n
- rdb.gil_acquire()
+ rdb.gil_release()
rdb.same_stack() # <- return in main thread
x = rdb.next('i'); assert x == 4000 * 300 # return from callme()
- rdb.gil_acquire()
rdb.write_call('%s\n' % (4000 * 300,))
x = rdb.next('q'); assert x == 0 # number of stop points
assert rdb.done()
@@ -85,17 +85,17 @@
self.compile(main, backendopt=False)
out = self.run('Xx')
rdb = self.fetch_rdb([self.exename, 'Xx'])
+ rdb.gil_release()
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
- rdb.gil_acquire()
rdb.write_call('40\n')
+ rdb.gil_release()
x = rdb.next('!h'); assert x == b # -> callback again
x = rdb.next('i'); assert x == 3 # arg n
- rdb.gil_acquire()
rdb.write_call('3\n')
+ rdb.gil_release()
rdb.same_stack() # -> return in main thread
x = rdb.next('i'); assert x == 120 # <- return from callme()
- rdb.gil_acquire()
rdb.write_call('120\n')
x = rdb.next('q'); assert x == 2 # number of stop points
assert rdb.done()
From pypy.commits at gmail.com Thu Aug 11 03:58:30 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 00:58:30 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Forgot to add the test file.
Message-ID: <57ac3026.c15e1c0a.1a675.2c00@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86134:678a96c2a2bf
Date: 2016-08-11 09:57 +0200
http://bitbucket.org/pypy/pypy/changeset/678a96c2a2bf/
Log: Forgot to add the test file.
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -748,7 +748,6 @@
'st_outer_controller_h'. This is the outer controller
loop.
*/
- attach_gdb();
while (1) {
struct replay_thread_s *node, **item, dummy;
diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py
new file mode 100644
--- /dev/null
+++ b/rpython/translator/revdb/test/test_thread.py
@@ -0,0 +1,107 @@
+from rpython.translator.revdb.test.test_basic import BaseRecordingTests
+from rpython.translator.revdb.test.test_basic import InteractiveTests
+from rpython.rtyper.lltypesystem import rffi
+from rpython.rlib import rthread
+from rpython.rlib import revdb
+
+from rpython.translator.revdb.message import *
+
+
+_sleep = rffi.llexternal('sleep', [rffi.UINT], rffi.UINT)
+
+
+class TestThreadRecording(BaseRecordingTests):
+
+ def test_thread_simple(self):
+ def bootstrap():
+ rthread.gc_thread_start()
+ _sleep(1)
+ print "BB"
+ _sleep(2)
+ print "BBB"
+ rthread.gc_thread_die()
+
+ def main(argv):
+ print "A"
+ rthread.start_new_thread(bootstrap, ())
+ for i in range(2):
+ _sleep(2)
+ print "AAAA"
+ return 9
+
+ self.compile(main, backendopt=False, thread=True)
+ out = self.run('Xx')
+ # should have printed A, BB, AAAA, BBB, AAAA
+ rdb = self.fetch_rdb([self.exename, 'Xx'])
+ th_A = rdb.main_thread_id
+ rdb.write_call("A\n")
+ rdb.same_stack() # RPyGilAllocate()
+ rdb.gil_release()
+
+ th_B = rdb.switch_thread()
+ assert th_B != th_A
+ b = rdb.next('!h'); assert 300 <= b < 310 # "callback": start thread
+ rdb.gil_release()
+
+ rdb.switch_thread(th_A)
+ rdb.same_stack() # start_new_thread returns
+ x = rdb.next(); assert x == th_B # result is the 'th_B' id
+ rdb.gil_release()
+
+ rdb.switch_thread(th_B)
+ rdb.same_stack() # sleep()
+ rdb.next('i') # sleep()
+ rdb.write_call("BB\n")
+ rdb.gil_release()
+
+ rdb.switch_thread(th_A)
+ rdb.same_stack() # sleep()
+ rdb.next('i') # sleep()
+ rdb.write_call("AAAA\n")
+ rdb.gil_release()
+
+ rdb.switch_thread(th_B)
+ rdb.same_stack() # sleep()
+ rdb.next('i') # sleep()
+ rdb.write_call("BBB\n")
+ rdb.gil_release()
+
+ rdb.switch_thread(th_A)
+ rdb.same_stack() # sleep()
+ rdb.next('i') # sleep()
+ rdb.write_call("AAAA\n")
+ rdb.done()
+
+
+class TestThreadInteractive(InteractiveTests):
+ expected_stop_points = 5
+
+ def setup_class(cls):
+ from rpython.translator.revdb.test.test_basic import compile, run
+ def bootstrap():
+ rthread.gc_thread_start()
+ _sleep(1)
+ revdb.stop_point()
+ _sleep(2)
+ revdb.stop_point()
+ rthread.gc_thread_die()
+
+ def main(argv):
+ revdb.stop_point()
+ rthread.start_new_thread(bootstrap, ())
+ for i in range(2):
+ _sleep(2)
+ revdb.stop_point()
+ print "ok"
+ return 9
+
+ compile(cls, main, backendopt=False, thread=True)
+ assert run(cls, '') == 'ok\n'
+
+ def test_go(self):
+ child = self.replay()
+ for i in range(2, 6):
+ child.send(Message(CMD_FORWARD, 1))
+ child.expect(ANSWER_READY, i, Ellipsis)
+ child.send(Message(CMD_FORWARD, 1))
+ child.expect(ANSWER_AT_END)
From pypy.commits at gmail.com Thu Aug 11 04:02:56 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 01:02:56 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: fix test
Message-ID: <57ac3130.4219c20a.21e8e.69fa@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86135:6a05c5fb0a03
Date: 2016-08-11 10:02 +0200
http://bitbucket.org/pypy/pypy/changeset/6a05c5fb0a03/
Log: fix test
diff --git a/rpython/translator/revdb/test/test_weak.py b/rpython/translator/revdb/test/test_weak.py
--- a/rpython/translator/revdb/test/test_weak.py
+++ b/rpython/translator/revdb/test/test_weak.py
@@ -203,6 +203,7 @@
assert time == i + 1
y = intmask(rdb.next('q')); assert y == -1
triggered = True
+ rdb.gil_release()
rdb.same_stack()
j = rdb.next()
assert j == i + 1000000 * triggered
@@ -215,6 +216,7 @@
assert uid > 0 and uid not in uid_seen
uid_seen.add(uid)
lst.append(uid)
+ rdb.gil_release()
rdb.same_stack()
totals.append((lst, intmask(rdb.next())))
x = rdb.next('q'); assert x == 3000 # number of stop points
From pypy.commits at gmail.com Thu Aug 11 04:06:22 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 01:06:22 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: trying
Message-ID: <57ac31fe.031dc20a.de9d1.6eff@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86136:0140e350104d
Date: 2016-08-11 10:05 +0200
http://bitbucket.org/pypy/pypy/changeset/0140e350104d/
Log: trying
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -56,7 +56,7 @@
])
reverse_debugger_disable_modules = set([
- "thread", "_continuation", "_vmprof", "_multiprocessing",
+ "_continuation", "_vmprof", "_multiprocessing",
])
# XXX this should move somewhere else, maybe to platform ("is this posixish"
From pypy.commits at gmail.com Thu Aug 11 04:46:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 01:46:20 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Careful,
we need the gil_acquire byte as well: otherwise effects
Message-ID: <57ac3b5c.09afc20a.90904.7b91@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86137:8ac394503ebe
Date: 2016-08-11 10:45 +0200
http://bitbucket.org/pypy/pypy/changeset/8ac394503ebe/
Log: Careful, we need the gil_acquire byte as well: otherwise effects
like writing to GC objects might occur at the wrong time
diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py
--- a/rpython/translator/revdb/gencsupp.py
+++ b/rpython/translator/revdb/gencsupp.py
@@ -83,18 +83,19 @@
return call_code # a hack for ll_call_destructor() to mean
# that the calls should really be done
#
- # hack: we don't need the flag for at least this common function
- if call_code == 'RPyGilAcquire();':
- return 'RPY_REVDB_CALL_GILCTRL(%s);' % (call_code,)
- if call_code == 'RPyGilRelease();':
+ if call_code in ('RPyGilAcquire();', 'RPyGilRelease();'):
# Could also work with a regular RPY_REVDB_CALL_VOID, but we
- # use a different byte (0xFD instead of 0xFC) to detect more
- # sync misses. In a single-threaded environment this 0xFD
+ # use a different byte (0xFD, 0xFE instead of 0xFC) to detect more
+ # sync misses. In a single-threaded environment this 0xFD or 0xFE
# byte is not needed at all, but in a multi-threaded
# environment it ensures that during replaying, just after
- # reading the 0xFD, we switch to a different thread if needed
+ # reading the 0xFD or 0xFE, we switch to a different thread if needed
# (actually implemented with stacklets).
- return 'RPY_REVDB_CALL_GIL(%s);' % (call_code,)
+ if call_code == 'RPyGilAcquire();':
+ byte = '0xFD'
+ else:
+ byte = '0xFE'
+ return 'RPY_REVDB_CALL_GIL(%s, %s);' % (call_code, byte)
#
tp = funcgen.lltypename(v_result)
if tp == 'void @':
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -57,8 +57,8 @@
static char rpy_rev_buffer[16384]; /* max. 32768 */
int rpy_rev_fileno = -1;
static char flag_io_disabled = FID_REGULAR_MODE;
-__thread bool_t rpy_active_thread;
-static bool_t *rpy_active_thread_ptr;
+__thread int rpy_active_thread;
+static int *rpy_active_thread_ptr;
static void setup_record_mode(int argc, char *argv[]);
@@ -1680,7 +1680,8 @@
RPY_EXTERN
void rpy_reverse_db_bad_acquire_gil(void)
{
- fprintf(stderr, "out of sync: unexpected byte in log (at acquire_gil)\n");
+ fprintf(stderr, "out of sync: unexpected byte in log "
+ " (at acquire_gil or release_gil)\n");
exit(1);
}
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -15,7 +15,7 @@
# error "explicit RPY_RDB_REPLAY: not really supported"
#endif
bool_t watch_enabled;
- long lock;
+ int lock;
char *buf_p, *buf_limit, *buf_readend;
uint64_t stop_point_seen, stop_point_break;
uint64_t unique_id_seen, unique_id_break;
@@ -23,7 +23,7 @@
RPY_EXTERN rpy_revdb_t rpy_revdb;
RPY_EXTERN int rpy_rev_fileno;
-RPY_EXTERN __thread bool_t rpy_active_thread;
+RPY_EXTERN __thread int rpy_active_thread;
/* ------------------------------------------------------------ */
@@ -66,11 +66,15 @@
/* Acquire/release the lock around EMIT_RECORD, because it may be
called without holding the GIL. Note that we're always
single-threaded during replaying: the lock is only useful during
- recording. */
+ recording.
+
+ Implementation trick: use 'a >= b' to mean 'a || !b' (the two
+ variables can only take the values 0 or 1).
+*/
#define _RPY_REVDB_LOCK() \
{ \
- bool_t _lock_contention = pypy_lock_test_and_set(&rpy_revdb.lock, 1); \
- if (_lock_contention || !rpy_active_thread) \
+ int _lock_contention = pypy_lock_test_and_set(&rpy_revdb.lock, 1); \
+ if (_lock_contention >= rpy_active_thread) \
rpy_reverse_db_lock_acquire(_lock_contention); \
}
#define _RPY_REVDB_UNLOCK() \
@@ -143,17 +147,17 @@
rpy_reverse_db_invoke_callback(_re); \
}
-#define RPY_REVDB_CALL_GIL(call_code) \
+#define RPY_REVDB_CALL_GIL(call_code, byte) \
if (!RPY_RDB_REPLAY) { \
call_code \
_RPY_REVDB_LOCK(); \
- _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFD) \
+ _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, byte) \
_RPY_REVDB_UNLOCK(); \
} \
else { \
unsigned char _re; \
_RPY_REVDB_EMIT_REPLAY(unsigned char _e, _re) \
- if (_re != 0xFD) \
+ if (_re != byte) \
rpy_reverse_db_bad_acquire_gil(); \
}
diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py
--- a/rpython/translator/revdb/test/test_basic.py
+++ b/rpython/translator/revdb/test/test_basic.py
@@ -95,12 +95,16 @@
x = self.next(); assert x == len(expected_string)
self.same_stack() # errno
x = self.next('i'); assert x == 0 # errno
+ self.gil_acquire()
def same_stack(self):
x = self.next('c'); assert x == '\xFC'
+ def gil_acquire(self):
+ x = self.next('c'); assert x == '\xFD'
+
def gil_release(self):
- x = self.next('c'); assert x == '\xFD'
+ x = self.next('c'); assert x == '\xFE'
def switch_thread(self, expected=None):
th, = self.special_packet(ASYNC_THREAD_SWITCH, 'q')
diff --git a/rpython/translator/revdb/test/test_callback.py b/rpython/translator/revdb/test/test_callback.py
--- a/rpython/translator/revdb/test/test_callback.py
+++ b/rpython/translator/revdb/test/test_callback.py
@@ -66,16 +66,20 @@
rdb.gil_release()
rdb.same_stack() # callmesimple()
x = rdb.next('i'); assert x == 55555
+ rdb.gil_acquire()
rdb.write_call('55555\n')
rdb.gil_release()
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
+ rdb.gil_acquire()
rdb.gil_release()
x = rdb.next('!h'); assert x == b # -> callback
x = rdb.next('i'); assert x == 3 # arg n
+ rdb.gil_acquire()
rdb.gil_release()
rdb.same_stack() # <- return in main thread
x = rdb.next('i'); assert x == 4000 * 300 # return from callme()
+ rdb.gil_acquire()
rdb.write_call('%s\n' % (4000 * 300,))
x = rdb.next('q'); assert x == 0 # number of stop points
assert rdb.done()
@@ -88,14 +92,17 @@
rdb.gil_release()
b = rdb.next('!h'); assert 300 <= b < 310 # -> callback
x = rdb.next('i'); assert x == 40 # arg n
+ rdb.gil_acquire()
rdb.write_call('40\n')
rdb.gil_release()
x = rdb.next('!h'); assert x == b # -> callback again
x = rdb.next('i'); assert x == 3 # arg n
+ rdb.gil_acquire()
rdb.write_call('3\n')
rdb.gil_release()
rdb.same_stack() # -> return in main thread
x = rdb.next('i'); assert x == 120 # <- return from callme()
+ rdb.gil_acquire()
rdb.write_call('120\n')
x = rdb.next('q'); assert x == 2 # number of stop points
assert rdb.done()
diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py
--- a/rpython/translator/revdb/test/test_thread.py
+++ b/rpython/translator/revdb/test/test_thread.py
@@ -41,34 +41,40 @@
th_B = rdb.switch_thread()
assert th_B != th_A
b = rdb.next('!h'); assert 300 <= b < 310 # "callback": start thread
+ rdb.gil_acquire()
rdb.gil_release()
rdb.switch_thread(th_A)
rdb.same_stack() # start_new_thread returns
x = rdb.next(); assert x == th_B # result is the 'th_B' id
+ rdb.gil_acquire()
rdb.gil_release()
rdb.switch_thread(th_B)
- rdb.same_stack() # sleep()
+ rdb.same_stack() # sleep() (finishes here)
rdb.next('i') # sleep()
+ rdb.gil_acquire()
rdb.write_call("BB\n")
rdb.gil_release()
rdb.switch_thread(th_A)
rdb.same_stack() # sleep()
rdb.next('i') # sleep()
+ rdb.gil_acquire()
rdb.write_call("AAAA\n")
rdb.gil_release()
rdb.switch_thread(th_B)
rdb.same_stack() # sleep()
rdb.next('i') # sleep()
+ rdb.gil_acquire()
rdb.write_call("BBB\n")
rdb.gil_release()
rdb.switch_thread(th_A)
rdb.same_stack() # sleep()
rdb.next('i') # sleep()
+ rdb.gil_acquire()
rdb.write_call("AAAA\n")
rdb.done()
diff --git a/rpython/translator/revdb/test/test_weak.py b/rpython/translator/revdb/test/test_weak.py
--- a/rpython/translator/revdb/test/test_weak.py
+++ b/rpython/translator/revdb/test/test_weak.py
@@ -204,8 +204,9 @@
y = intmask(rdb.next('q')); assert y == -1
triggered = True
rdb.gil_release()
- rdb.same_stack()
- j = rdb.next()
+ rdb.same_stack() #
+ j = rdb.next() # call to foobar()
+ rdb.gil_acquire()
assert j == i + 1000000 * triggered
if triggered:
lst = []
@@ -217,8 +218,9 @@
uid_seen.add(uid)
lst.append(uid)
rdb.gil_release()
- rdb.same_stack()
- totals.append((lst, intmask(rdb.next())))
+ rdb.same_stack() #
+ totals.append((lst, intmask(rdb.next()))) # call to foobar()
+ rdb.gil_acquire()
x = rdb.next('q'); assert x == 3000 # number of stop points
#
assert 1500 <= len(uid_seen) <= 3000
From pypy.commits at gmail.com Thu Aug 11 06:45:21 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 11 Aug 2016 03:45:21 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: several changes for translation
Message-ID: <57ac5741.c15e1c0a.1a675.7043@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86138:3c775a43b9af
Date: 2016-08-11 12:44 +0200
http://bitbucket.org/pypy/pypy/changeset/3c775a43b9af/
Log: several changes for translation renamed argument name in
Python.asdl, will resolve an issue for the new type singleton which
is an object correctly emitting from/to_object for singleton and
bytes type check for generator added commented old validation code
for starargs varargannotation ...
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,7 +16,7 @@
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
raise oefmt(space.w_TypeError,
- "AST string must be of type str or unicode")
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
@@ -2568,7 +2568,7 @@
def to_object(self, space):
w_node = space.call_function(get(space).w_Bytes)
- w_s = self.s.to_object(space) # bytes
+ w_s = self.s # bytes
space.setattr(w_node, space.wrap('s'), w_s)
w_lineno = space.wrap(self.lineno) # int
space.setattr(w_node, space.wrap('lineno'), w_lineno)
@@ -2581,7 +2581,7 @@
w_s = get_field(space, w_node, 's', False)
w_lineno = get_field(space, w_node, 'lineno', False)
w_col_offset = get_field(space, w_node, 'col_offset', False)
- _s = bytes.from_object(space, w_s)
+ _s = check_string(space, w_s)
if _s is None:
raise_required_value(space, w_node, 's')
_lineno = space.int_w(w_lineno)
@@ -2593,8 +2593,8 @@
class NameConstant(expr):
- def __init__(self, value, lineno, col_offset):
- self.value = value
+ def __init__(self, single, lineno, col_offset):
+ self.single = single
expr.__init__(self, lineno, col_offset)
def walkabout(self, visitor):
@@ -2605,8 +2605,8 @@
def to_object(self, space):
w_node = space.call_function(get(space).w_NameConstant)
- w_value = self.value.to_object(space) # singleton
- space.setattr(w_node, space.wrap('value'), w_value)
+ w_single = self.single # singleton
+ space.setattr(w_node, space.wrap('single'), w_single)
w_lineno = space.wrap(self.lineno) # int
space.setattr(w_node, space.wrap('lineno'), w_lineno)
w_col_offset = space.wrap(self.col_offset) # int
@@ -2615,17 +2615,17 @@
@staticmethod
def from_object(space, w_node):
- w_value = get_field(space, w_node, 'value', False)
+ w_single = get_field(space, w_node, 'single', False)
w_lineno = get_field(space, w_node, 'lineno', False)
w_col_offset = get_field(space, w_node, 'col_offset', False)
- _value = singleton.from_object(space, w_value)
- if _value is None:
- raise_required_value(space, w_node, 'value')
+ _single = w_single
+ if _single is None:
+ raise_required_value(space, w_node, 'single')
_lineno = space.int_w(w_lineno)
_col_offset = space.int_w(w_col_offset)
- return NameConstant(_value, _lineno, _col_offset)
-
-State.ast_type('NameConstant', 'expr', ['value'])
+ return NameConstant(_single, _lineno, _col_offset)
+
+State.ast_type('NameConstant', 'expr', ['single'])
class Ellipsis(expr):
@@ -2952,8 +2952,8 @@
class Const(expr):
- def __init__(self, value, lineno, col_offset):
- self.value = value
+ def __init__(self, obj, lineno, col_offset):
+ self.obj = obj
expr.__init__(self, lineno, col_offset)
def walkabout(self, visitor):
@@ -2964,8 +2964,8 @@
def to_object(self, space):
w_node = space.call_function(get(space).w_Const)
- w_value = self.value # object
- space.setattr(w_node, space.wrap('value'), w_value)
+ w_obj = self.obj # object
+ space.setattr(w_node, space.wrap('obj'), w_obj)
w_lineno = space.wrap(self.lineno) # int
space.setattr(w_node, space.wrap('lineno'), w_lineno)
w_col_offset = space.wrap(self.col_offset) # int
@@ -2974,17 +2974,17 @@
@staticmethod
def from_object(space, w_node):
- w_value = get_field(space, w_node, 'value', False)
+ w_obj = get_field(space, w_node, 'obj', False)
w_lineno = get_field(space, w_node, 'lineno', False)
w_col_offset = get_field(space, w_node, 'col_offset', False)
- _value = w_value
- if _value is None:
- raise_required_value(space, w_node, 'value')
+ _obj = w_obj
+ if _obj is None:
+ raise_required_value(space, w_node, 'obj')
_lineno = space.int_w(w_lineno)
_col_offset = space.int_w(w_col_offset)
- return Const(_value, _lineno, _col_offset)
-
-State.ast_type('Const', 'expr', ['value'])
+ return Const(_obj, _lineno, _col_offset)
+
+State.ast_type('Const', 'expr', ['obj'])
class expr_context(AST):
diff --git a/pypy/interpreter/astcompiler/asthelpers.py b/pypy/interpreter/astcompiler/asthelpers.py
--- a/pypy/interpreter/astcompiler/asthelpers.py
+++ b/pypy/interpreter/astcompiler/asthelpers.py
@@ -152,7 +152,7 @@
def as_node_list(self, space):
try:
- values_w = space.unpackiterable(self.value)
+ values_w = space.unpackiterable(self.obj)
except OperationError:
return None
line = self.lineno
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -1047,7 +1047,7 @@
def visit_Const(self, const):
self.update_position(const.lineno)
- self.load_const(const.value)
+ self.load_const(const.obj)
def visit_Ellipsis(self, e):
self.load_const(self.space.w_Ellipsis)
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -65,7 +65,7 @@
class __extend__(ast.Const):
def as_constant(self):
- return self.value
+ return self.obj
class __extend__(ast.Index):
def as_constant(self):
diff --git a/pypy/interpreter/astcompiler/tools/Python.asdl b/pypy/interpreter/astcompiler/tools/Python.asdl
--- a/pypy/interpreter/astcompiler/tools/Python.asdl
+++ b/pypy/interpreter/astcompiler/tools/Python.asdl
@@ -71,7 +71,8 @@
| Num(object n) -- a number as a PyObject.
| Str(string s) -- need to specify raw, unicode, etc?
| Bytes(bytes s)
- | NameConstant(singleton value)
+ -- PyPy mod. first argument name must not be value
+ | NameConstant(singleton single)
| Ellipsis
-- the following expression can appear in assignment context
@@ -83,7 +84,7 @@
| Tuple(expr* elts, expr_context ctx)
-- PyPy modification
- | Const(object value)
+ | Const(object obj)
-- col_offset is the byte offset in the utf8 string the parser uses
attributes (int lineno, int col_offset)
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -130,7 +130,7 @@
if field.opt:
wrapper += " if %s is not None else space.w_None" % (value,)
return wrapper
- elif field.type in ("object", "string"):
+ elif field.type in ("object", "singleton", "string", "bytes"):
return value
elif field.type in ("int", "bool"):
return "space.wrap(%s)" % (value,)
@@ -145,9 +145,9 @@
def get_value_extractor(self, field, value):
if field.type in self.data.simple_types:
return "%s.from_object(space, %s)" % (field.type, value)
- elif field.type in ("object",):
+ elif field.type in ("object","singleton"):
return value
- elif field.type in ("string",):
+ elif field.type in ("string","bytes"):
return "check_string(space, %s)" % (value,)
elif field.type in ("identifier",):
if field.opt:
diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py
--- a/pypy/interpreter/astcompiler/validate.py
+++ b/pypy/interpreter/astcompiler/validate.py
@@ -152,15 +152,15 @@
def visit_arguments(self, node):
self.visit_sequence(node.args)
- if node.varargannotation:
- if not node.vararg:
- raise ValidationError("varargannotation but no vararg on arguments")
- self._validate_expr(node.varargannotation)
+ # XXX py3.5 missing if node.varargannotation:
+ # XXX py3.5 missing if not node.vararg:
+ # XXX py3.5 missing raise ValidationError("varargannotation but no vararg on arguments")
+ # XXX py3.5 missing self._validate_expr(node.varargannotation)
self.visit_sequence(node.kwonlyargs)
- if node.kwargannotation:
- if not node.kwarg:
- raise ValidationError("kwargannotation but no kwarg on arguments")
- self._validate_expr(node.kwargannotation)
+ # XXX py3.5 missing if node.kwargannotation:
+ # XXX py3.5 missing if not node.kwarg:
+ # XXX py3.5 missing raise ValidationError("kwargannotation but no kwarg on arguments")
+ # XXX py3.5 missing self._validate_expr(node.kwargannotation)
if self._len(node.defaults) > self._len(node.args):
raise ValidationError("more positional defaults than args on arguments")
if self._len(node.kw_defaults) != self._len(node.kwonlyargs):
@@ -184,10 +184,10 @@
self._validate_exprs(node.bases)
self.visit_sequence(node.keywords)
self._validate_exprs(node.decorator_list)
- if node.starargs:
- self._validate_expr(node.starargs)
- if node.kwargs:
- self._validate_expr(node.kwargs)
+ # XXX py3.5 missing if node.starargs:
+ # XXX py3.5 missing self._validate_expr(node.starargs)
+ # XXX py3.5 missing if node.kwargs:
+ # XXX py3.5 missing self._validate_expr(node.kwargs)
def visit_Return(self, node):
if node.value:
@@ -373,10 +373,10 @@
self._validate_expr(node.func)
self._validate_exprs(node.args)
self.visit_sequence(node.keywords)
- if node.starargs:
- self._validate_expr(node.starargs)
- if node.kwargs:
- self._validate_expr(node.kwargs)
+ # XXX py3.5 missing if node.starargs:
+ # XXX py3.5 missing self._validate_expr(node.starargs)
+ # XXX py3.5 missing if node.kwargs:
+ # XXX py3.5 missing self._validate_expr(node.kwargs)
def visit_Num(self, node):
space = self.space
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -316,7 +316,8 @@
res = space.get_and_call_function(w_await, self)
if res is not None:
if (isinstance(res, Coroutine) or
- res.pycode.co_flags & consts.CO_ITERABLE_COROUTINE):
+ (isinstance(res, GeneratorIterator) and \
+ res.pycode.co_flags & consts.CO_ITERABLE_COROUTINE)):
raise oefmt(space.w_TypeError,
"__await__() returned a coroutine")
elif space.lookup(self, "__next__") is None:
From pypy.commits at gmail.com Thu Aug 11 10:15:18 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 07:15:18 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Restore usage of
RDBSignalActionFlag, which might have been removed by
Message-ID: <57ac8876.6aaac20a.437bd.03f9@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86139:260de9d411ea
Date: 2016-08-11 14:01 +0200
http://bitbucket.org/pypy/pypy/changeset/260de9d411ea/
Log: Restore usage of RDBSignalActionFlag, which might have been removed
by accident
diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py
--- a/pypy/module/signal/__init__.py
+++ b/pypy/module/signal/__init__.py
@@ -46,7 +46,10 @@
space.check_signal_action = interp_signal.CheckSignalAction(space)
space.actionflag.register_periodic_action(space.check_signal_action,
use_bytecode_counter=False)
- if not space.config.translation.reverse_debugger:
+ if space.config.translation.reverse_debugger:
+ from pypy.interpreter.reverse_debugging import RDBSignalActionFlag
+ space.actionflag.__class__ = RDBSignalActionFlag
+ else:
space.actionflag.__class__ = interp_signal.SignalActionFlag
# xxx yes I know the previous line is a hack
From pypy.commits at gmail.com Thu Aug 11 10:15:20 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 07:15:20 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Translation fixes
Message-ID: <57ac8878.2472c20a.3b6d9.05ff@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86140:c4ffe110cd17
Date: 2016-08-11 14:01 +0200
http://bitbucket.org/pypy/pypy/changeset/c4ffe110cd17/
Log: Translation fixes
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -131,7 +131,8 @@
_RPY_REVDB_UNLOCK();
}
-static void reverse_db_teardown(void)
+RPY_EXTERN
+void rpy_reverse_db_teardown(void)
{
uint64_t stop_points;
if (!RPY_RDB_REPLAY) {
@@ -142,8 +143,8 @@
rpy_reverse_db_fetch(), which has nothing more to fetch now */
rpy_revdb.buf_limit += 1;
}
- _RPY_REVDB_EMIT_L(stop_points = rpy_revdb.stop_point_seen; ,
- uint64_t _e, stop_points, /*must_lock=*/0);
+ _RPY_REVDB_EMIT_L(stop_points = rpy_revdb.stop_point_seen;,
+ uint64_t _e, stop_points);
if (!RPY_RDB_REPLAY) {
rpy_reverse_db_flush();
@@ -523,8 +524,7 @@
else
r->re_off_prev = 1; /* any number > 0 */
- _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive,
- /*must_lock=*/0);
+ _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive);
if (!RPY_RDB_REPLAY) {
_RPY_REVDB_UNLOCK();
@@ -571,8 +571,7 @@
WEAKREF_AFTERWARDS_ALIVE);
r->re_off_prev = recording_offset();
}
- _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive,
- /*must_lock=*/0);
+ _RPY_REVDB_EMIT_L(alive = WEAKREF_AFTERWARDS_DEAD;, char _e, alive);
if (!RPY_RDB_REPLAY) {
_RPY_REVDB_UNLOCK();
@@ -684,7 +683,7 @@
m->entry_point(m->argc, m->argv);
/* main thread finished, program stops */
- reverse_db_teardown();
+ rpy_reverse_db_teardown();
/* unreachable */
abort();
@@ -730,7 +729,7 @@
{
if (!RPY_RDB_REPLAY) {
int exitcode = (int)entry_point(argc, argv);
- reverse_db_teardown();
+ rpy_reverse_db_teardown();
return exitcode;
}
else {
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -103,17 +103,21 @@
variable = _e; \
}
-#define _RPY_REVDB_EMIT_L(normal_code, decl_e, variable, must_lock) \
+#define _RPY_REVDB_EMIT_L(normal_code, decl_e, variable) \
if (!RPY_RDB_REPLAY) { \
normal_code \
- if (must_lock) _RPY_REVDB_LOCK(); \
_RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
- if (must_lock) _RPY_REVDB_UNLOCK(); \
} else \
_RPY_REVDB_EMIT_REPLAY(decl_e, variable)
#define RPY_REVDB_EMIT(normal_code, decl_e, variable) \
- _RPY_REVDB_EMIT_L(normal_code, decl_e, variable, 1)
+ if (!RPY_RDB_REPLAY) { \
+ normal_code \
+ _RPY_REVDB_LOCK(); \
+ _RPY_REVDB_EMIT_RECORD_L(decl_e, variable) \
+ _RPY_REVDB_UNLOCK(); \
+ } else \
+ _RPY_REVDB_EMIT_REPLAY(decl_e, variable)
#define RPY_REVDB_EMIT_VOID(normal_code) \
if (!RPY_RDB_REPLAY) { normal_code } else { }
From pypy.commits at gmail.com Thu Aug 11 10:18:54 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 11 Aug 2016 07:18:54 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: merged py3k changes,
generatorentry_driver was used twice, that is now allowed
Message-ID: <57ac894e.8f8e1c0a.8df7f.1f77@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86141:ce8bb88f9458
Date: 2016-08-11 16:18 +0200
http://bitbucket.org/pypy/pypy/changeset/ce8bb88f9458/
Log: merged py3k changes, generatorentry_driver was used twice, that is
now allowed
diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py
--- a/lib-python/3/test/test_hash.py
+++ b/lib-python/3/test/test_hash.py
@@ -198,7 +198,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
repr_ = None
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -326,6 +326,12 @@
"of type '%T'", res)
return res
+def get_printable_coroutine_location_genentry(bytecode):
+ return '%s ' % (bytecode.get_repr(),)
+coroutineentry_driver = jit.JitDriver(greens=['pycode'],
+ reds=['gen', 'w_arg', 'operr'],
+ get_printable_location = get_printable_coroutine_location_genentry,
+ name='coroutineentry')
class Coroutine(W_Root):
"A coroutine object."
@@ -511,8 +517,8 @@
pycode = self.pycode
if pycode is not None:
if jit.we_are_jitted() and should_not_inline(pycode):
- generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
- operr=operr, pycode=pycode)
+ coroutineentry_driver.jit_merge_point(gen=self, w_arg=w_arg,
+ operr=operr, pycode=pycode)
return self._send_ex(w_arg, operr)
def _send_ex(self, w_arg, operr):
diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py
--- a/pypy/module/cppyy/pythonify.py
+++ b/pypy/module/cppyy/pythonify.py
@@ -175,7 +175,7 @@
"__new__" : make_new(class_name),
}
pycppclass = metacpp(class_name, _drop_cycles(bases), d)
-
+
# cache result early so that the class methods can find the class itself
setattr(scope, final_class_name, pycppclass)
@@ -192,13 +192,10 @@
for dm_name in cppclass.get_datamember_names():
cppdm = cppclass.get_datamember(dm_name)
- # here, setattr() can not be used, because a data member can shadow one in
- # its base class, resulting in the __set__() of its base class being called
- # by setattr(); so, store directly on the dictionary
- pycppclass.__dict__[dm_name] = cppdm
+ setattr(pycppclass, dm_name, cppdm)
import cppyy
if cppyy._is_static(cppdm): # TODO: make this a method of cppdm
- metacpp.__dict__[dm_name] = cppdm
+ setattr(metacpp, dm_name, cppdm)
# the call to register will add back-end specific pythonizations and thus
# needs to run first, so that the generic pythonizations can use them
@@ -413,7 +410,7 @@
lib = cppyy._load_dictionary(name)
_loaded_dictionaries[name] = lib
return lib
-
+
def _init_pythonify():
# cppyy should not be loaded at the module level, as that will trigger a
# call to space.getbuiltinmodule(), which will cause cppyy to be loaded
diff --git a/pypy/module/cpyext/dictproxyobject.py b/pypy/module/cpyext/dictproxyobject.py
--- a/pypy/module/cpyext/dictproxyobject.py
+++ b/pypy/module/cpyext/dictproxyobject.py
@@ -1,67 +1,7 @@
-# Read-only proxy for mappings. PyPy does not have a separate type for
-# type.__dict__, so PyDictProxy_New has to use a custom read-only mapping.
-
-from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
-from pypy.interpreter.typedef import TypeDef, interp2app
+from pypy.objspace.std.dictproxyobject import W_DictProxyObject
from pypy.module.cpyext.api import cpython_api, build_type_checkers
from pypy.module.cpyext.pyobject import PyObject
-class W_DictProxyObject(W_Root):
- "Read-only proxy for mappings."
-
- def __init__(self, w_mapping):
- self.w_mapping = w_mapping
-
- def descr_len(self, space):
- return space.len(self.w_mapping)
-
- def descr_getitem(self, space, w_key):
- return space.getitem(self.w_mapping, w_key)
-
- def descr_contains(self, space, w_key):
- return space.contains(self.w_mapping, w_key)
-
- def descr_iter(self, space):
- return space.iter(self.w_mapping)
-
- def descr_str(self, space):
- return space.str(self.w_mapping)
-
- def descr_repr(self, space):
- return space.repr(self.w_mapping)
-
- @unwrap_spec(w_default=WrappedDefault(None))
- def get_w(self, space, w_key, w_default):
- return space.call_method(self.w_mapping, "get", w_key, w_default)
-
- def keys_w(self, space):
- return space.call_method(self.w_mapping, "keys")
-
- def values_w(self, space):
- return space.call_method(self.w_mapping, "values")
-
- def items_w(self, space):
- return space.call_method(self.w_mapping, "items")
-
- def copy_w(self, space):
- return space.call_method(self.w_mapping, "copy")
-
-W_DictProxyObject.typedef = TypeDef(
- 'mappingproxy',
- __len__=interp2app(W_DictProxyObject.descr_len),
- __getitem__=interp2app(W_DictProxyObject.descr_getitem),
- __contains__=interp2app(W_DictProxyObject.descr_contains),
- __iter__=interp2app(W_DictProxyObject.descr_iter),
- __str__=interp2app(W_DictProxyObject.descr_str),
- __repr__=interp2app(W_DictProxyObject.descr_repr),
- get=interp2app(W_DictProxyObject.get_w),
- keys=interp2app(W_DictProxyObject.keys_w),
- values=interp2app(W_DictProxyObject.values_w),
- items=interp2app(W_DictProxyObject.items_w),
- copy=interp2app(W_DictProxyObject.copy_w)
-)
-
PyDictProxy_Check, PyDictProxy_CheckExact = build_type_checkers(
"DictProxy", W_DictProxyObject)
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -417,8 +417,7 @@
init = """PyObject *mod = PyModule_Create(&moduledef);"""
if more_init:
init += more_init
- else:
- init += "\nreturn mod;"
+ init += "\nreturn mod;"
return import_module(space, name=modname, init=init, body=body,
w_include_dirs=w_include_dirs,
PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN)
diff --git a/pypy/module/cpyext/test/test_import_module.c b/pypy/module/cpyext/test/test_import_module.c
--- a/pypy/module/cpyext/test/test_import_module.c
+++ b/pypy/module/cpyext/test/test_import_module.c
@@ -1,17 +1,20 @@
#include "Python.h"
/* Initialize this module. */
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "test_import_module",
+ NULL,
+ -1,
+ NULL, NULL, NULL, NULL, NULL
+};
+
PyMODINIT_FUNC
-inittest_import_module(void)
+PyInit_test_import_module(void)
{
- PyObject *m, *d;
-
- m = Py_InitModule("test_import_module", NULL);
- if (m == NULL)
- return;
- d = PyModule_GetDict(m);
- if (d) {
- PyDict_SetItemString(d, "TEST", (PyObject *) Py_None);
- }
- /* No need to check the error here, the caller will do that */
+ PyObject* m = PyModule_Create(&moduledef);
+ if (m == NULL)
+ return NULL;
+ PyModule_AddObject(m, "TEST", (PyObject *) Py_None);
+ return m;
}
diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py
--- a/pypy/module/cpyext/test/test_number.py
+++ b/pypy/module/cpyext/test/test_number.py
@@ -1,5 +1,6 @@
from rpython.rtyper.lltypesystem import lltype
from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class TestIterator(BaseApiTest):
def test_check(self, space, api):
@@ -63,7 +64,9 @@
assert 9 == space.unwrap(
api.PyNumber_InPlacePower(space.wrap(3), space.wrap(2), space.w_None))
- def test_PyNumber_Check(self):
+
+class AppTestCNumber(AppTestCpythonExtensionBase):
+ def test_PyNumber_Check(self):
mod = self.import_extension('foo', [
("test_PyNumber_Check", "METH_VARARGS",
'''
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -277,9 +277,41 @@
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
- ''')])
+ '''),
+ ("get_type_dict", "METH_O",
+ '''
+ PyObject* value = args->ob_type->tp_dict;
+ if (value == NULL) value = Py_None;
+ Py_INCREF(value);
+ return value;
+ '''),
+ ])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ class A(object):
+ pass
+ obj = A()
+ d = module.get_type_dict(obj)
+ assert type(d) is dict
+ d["_some_attribute"] = 1
+ assert type(obj)._some_attribute == 1
+ del d["_some_attribute"]
+
+ d = module.get_type_dict(1)
+ assert type(d) is dict
+ try:
+ d["_some_attribute"] = 1
+ except TypeError: # on PyPy, int.__dict__ is really immutable
+ pass
+ else:
+ assert int._some_attribute == 1
+ del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
@@ -348,6 +380,21 @@
api.Py_DecRef(ref)
+ def test_type_dict(self, space, api):
+ w_class = space.appexec([], """():
+ class A(object):
+ pass
+ return A
+ """)
+ ref = make_ref(space, w_class)
+
+ py_type = rffi.cast(PyTypeObjectPtr, ref)
+ w_dict = from_ref(space, py_type.c_tp_dict)
+ w_name = space.newunicode(u'a')
+ space.setitem(w_dict, w_name, space.wrap(1))
+ assert space.int_w(space.getattr(w_class, w_name)) == 1
+ space.delitem(w_dict, w_name)
+
def test_multiple_inheritance(self, space, api):
w_class = space.appexec([], """():
class A(object):
@@ -779,7 +826,7 @@
""", more_init="""
IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT;
IntLike_Type.tp_as_number = &intlike_as_number;
- intlike_as_number.nb_bool = intlike_nb_nonzero;
+ intlike_as_number.nb_bool = intlike_nb_bool;
intlike_as_number.nb_int = intlike_nb_int;
PyType_Ready(&IntLike_Type);
""")
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -192,7 +192,7 @@
py_methoddescr.c_d_method = w_obj.ml
def classmethoddescr_realize(space, obj):
- # XXX NOT TESTED When is this ever called?
+ # XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCClassMethodObject, w_type)
@@ -201,7 +201,7 @@
return w_obj
def methoddescr_realize(space, obj):
- # XXX NOT TESTED When is this ever called?
+ # XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCMethodObject, w_type)
@@ -272,12 +272,12 @@
if len(slot_names) == 1:
if not getattr(pto, slot_names[0]):
setattr(pto, slot_names[0], slot_func_helper)
- elif (w_type.getname(space) in ('list', 'tuple') and
+ elif (w_type.getname(space) in ('list', 'tuple') and
slot_names[0] == 'c_tp_as_number'):
# XXX hack - hwo can we generalize this? The problem is method
# names like __mul__ map to more than one slot, and we have no
# convenient way to indicate which slots CPython have filled
- #
+ #
# We need at least this special case since Numpy checks that
# (list, tuple) do __not__ fill tp_as_number
pass
@@ -767,8 +767,8 @@
if w_obj.is_cpytype():
Py_DecRef(space, pto.c_tp_dict)
- w_dict = w_obj.getdict(space)
- pto.c_tp_dict = make_ref(space, w_dict)
+ w_dict = w_obj.getdict(space)
+ pto.c_tp_dict = make_ref(space, w_dict)
@cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyType_IsSubtype(space, a, b):
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -541,6 +541,8 @@
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
t_ref[0] = tt
pbuf = c_localtime(t_ref)
+ rffi.setintfield(pbuf, "c_tm_year",
+ rffi.getintfield(pbuf, "c_tm_year") + 1900)
lltype.free(t_ref, flavor='raw')
if not pbuf:
raise OperationError(space.w_ValueError,
@@ -584,7 +586,7 @@
if rffi.getintfield(glob_buf, 'c_tm_wday') < -1:
raise oefmt(space.w_ValueError, "day of week out of range")
- rffi.setintfield(glob_buf, 'c_tm_year', y - 1900)
+ rffi.setintfield(glob_buf, 'c_tm_year', y)
rffi.setintfield(glob_buf, 'c_tm_mon',
rffi.getintfield(glob_buf, 'c_tm_mon') - 1)
rffi.setintfield(glob_buf, 'c_tm_wday',
@@ -648,7 +650,8 @@
t_ref[0] = seconds
p = c_localtime(t_ref)
if not p:
- raise oefmt(space.w_ValueError, "unconvertible time")
+ raise oefmt(space.w_OSError, "unconvertible time")
+ rffi.setintfield(p, "c_tm_year", rffi.getintfield(p, "c_tm_year") + 1900)
return _asctime(space, p)
# by now w_tup is an optional argument (and not *args)
@@ -677,7 +680,7 @@
w(getif(t_ref, 'c_tm_hour')),
w(getif(t_ref, 'c_tm_min')),
w(getif(t_ref, 'c_tm_sec')),
- w(getif(t_ref, 'c_tm_year') + 1900)]
+ w(getif(t_ref, 'c_tm_year'))]
return space.mod(w("%.3s %.3s%3d %.2d:%.2d:%.2d %d"),
space.newtuple(args))
@@ -715,7 +718,7 @@
lltype.free(t_ref, flavor='raw')
if not p:
- raise OperationError(space.w_ValueError, space.wrap(_get_error_msg()))
+ raise OperationError(space.w_OSError, space.wrap(_get_error_msg()))
return _tm_to_tuple(space, p)
def mktime(space, w_tup):
@@ -725,6 +728,7 @@
buf = _gettmarg(space, w_tup, allowNone=False)
rffi.setintfield(buf, "c_tm_wday", -1)
+ rffi.setintfield(buf, "c_tm_year", rffi.getintfield(buf, "c_tm_year") - 1900)
tt = c_mktime(buf)
# A return value of -1 does not necessarily mean an error, but tm_wday
# cannot remain set to -1 if mktime succeeds.
@@ -801,6 +805,8 @@
rffi.setintfield(buf_value, 'c_tm_isdst', -1)
elif rffi.getintfield(buf_value, 'c_tm_isdst') > 1:
rffi.setintfield(buf_value, 'c_tm_isdst', 1)
+ rffi.setintfield(buf_value, "c_tm_year",
+ rffi.getintfield(buf_value, "c_tm_year") - 1900)
if _WIN:
# check that the format string contains only valid directives
diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/classdict.py
@@ -0,0 +1,119 @@
+from rpython.rlib import rerased
+from rpython.rlib.objectmodel import iteritems_with_hash
+
+from pypy.interpreter.error import OperationError, oefmt
+from pypy.objspace.std.dictmultiobject import (
+ DictStrategy, create_iterator_classes)
+from pypy.objspace.std.typeobject import unwrap_cell
+
+
+class ClassDictStrategy(DictStrategy):
+ """Exposes a W_TypeObject.dict_w at app-level.
+
+ Uses getdictvalue() and setdictvalue() to access items.
+ """
+ erase, unerase = rerased.new_erasing_pair("dictproxy")
+ erase = staticmethod(erase)
+ unerase = staticmethod(unerase)
+
+ def getitem(self, w_dict, w_key):
+ space = self.space
+ w_lookup_type = space.type(w_key)
+ if space.issubtype_w(w_lookup_type, space.w_unicode):
+ return self.getitem_str(w_dict, space.str_w(w_key))
+ else:
+ return None
+
+ def getitem_str(self, w_dict, key):
+ return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
+
+ def setitem(self, w_dict, w_key, w_value):
+ space = self.space
+ if space.is_w(space.type(w_key), space.w_unicode):
+ self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
+ else:
+ raise oefmt(space.w_TypeError,
+ "cannot add non-string keys to dict of a type")
+
+ def setitem_str(self, w_dict, key, w_value):
+ w_type = self.unerase(w_dict.dstorage)
+ try:
+ w_type.setdictvalue(self.space, key, w_value)
+ except OperationError as e:
+ if not e.match(self.space, self.space.w_TypeError):
+ raise
+ if not w_type.is_cpytype():
+ raise
+ # Allow cpyext to write to type->tp_dict even in the case
+ # of a builtin type.
+ # Like CPython, we assume that this is only done early
+ # after the type is created, and we don't invalidate any
+ # cache. User code shoud call PyType_Modified().
+ w_type.dict_w[key] = w_value
+
+ def setdefault(self, w_dict, w_key, w_default):
+ w_result = self.getitem(w_dict, w_key)
+ if w_result is not None:
+ return w_result
+ self.setitem(w_dict, w_key, w_default)
+ return w_default
+
+ def delitem(self, w_dict, w_key):
+ space = self.space
+ w_key_type = space.type(w_key)
+ if space.is_w(w_key_type, space.w_unicode):
+ key = self.space.str_w(w_key)
+ if not self.unerase(w_dict.dstorage).deldictvalue(space, key):
+ raise KeyError
+ else:
+ raise KeyError
+
+ def length(self, w_dict):
+ return len(self.unerase(w_dict.dstorage).dict_w)
+
+ def w_keys(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ return space.newlist([_wrapkey(space, key)
+ for key in w_type.dict_w.iterkeys()])
+
+ def values(self, w_dict):
+ return [unwrap_cell(self.space, w_value) for w_value in
+ self.unerase(w_dict.dstorage).dict_w.itervalues()]
+
+ def items(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ return [space.newtuple([_wrapkey(space, key),
+ unwrap_cell(space, w_value)])
+ for (key, w_value) in w_type.dict_w.iteritems()]
+
+ def clear(self, w_dict):
+ space = self.space
+ w_type = self.unerase(w_dict.dstorage)
+ if not w_type.is_heaptype():
+ raise oefmt(space.w_TypeError,
+ "can't clear dictionary of type '%N'", w_type)
+ w_type.dict_w.clear()
+ w_type.mutated(None)
+
+ def getiterkeys(self, w_dict):
+ return self.unerase(w_dict.dstorage).dict_w.iterkeys()
+
+ def getitervalues(self, w_dict):
+ return self.unerase(w_dict.dstorage).dict_w.itervalues()
+
+ def getiteritems_with_hash(self, w_dict):
+ return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
+
+ def wrapkey(space, key):
+ return _wrapkey(space, key)
+
+ def wrapvalue(space, value):
+ return unwrap_cell(space, value)
+
+def _wrapkey(space, key):
+ # keys are utf-8 encoded identifiers from type's dict_w
+ return space.wrap(key.decode('utf-8'))
+
+create_iterator_classes(ClassDictStrategy)
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,212 +1,95 @@
-from rpython.rlib import rerased
-from rpython.rlib.objectmodel import iteritems_with_hash
+"""
+Read-only proxy for mappings.
-from pypy.interpreter.error import OperationError, oefmt
-from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.typedef import TypeDef
-from pypy.objspace.std.dictmultiobject import (
- DictStrategy, W_DictObject, create_iterator_classes)
-from pypy.objspace.std.typeobject import unwrap_cell
+Its main use is as the return type of cls.__dict__.
+"""
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
+from pypy.interpreter.typedef import TypeDef, interp2app
-class W_DictProxyObject(W_DictObject):
+class W_DictProxyObject(W_Root):
+ "Read-only proxy for mappings."
+
+ def __init__(self, w_mapping):
+ self.w_mapping = w_mapping
+
@staticmethod
def descr_new(space, w_type, w_mapping):
if (not space.lookup(w_mapping, "__getitem__") or
- space.isinstance_w(w_mapping, space.w_list) or
- space.isinstance_w(w_mapping, space.w_tuple)):
+ space.isinstance_w(w_mapping, space.w_list) or
+ space.isinstance_w(w_mapping, space.w_tuple)):
raise oefmt(space.w_TypeError,
- "mappingproxy() argument must be a mapping, not %T", w_mapping)
- strategy = space.fromcache(MappingProxyStrategy)
- storage = strategy.erase(w_mapping)
- w_obj = space.allocate_instance(W_DictProxyObject, w_type)
- W_DictProxyObject.__init__(w_obj, space, strategy, storage)
- return w_obj
+ "mappingproxy() argument must be a mapping, not %T",
+ w_mapping)
+ return W_DictProxyObject(w_mapping)
def descr_init(self, space, __args__):
pass
+ def descr_len(self, space):
+ return space.len(self.w_mapping)
+
+ def descr_getitem(self, space, w_key):
+ return space.getitem(self.w_mapping, w_key)
+
+ def descr_contains(self, space, w_key):
+ return space.contains(self.w_mapping, w_key)
+
+ def descr_iter(self, space):
+ return space.iter(self.w_mapping)
+
+ def descr_str(self, space):
+ return space.str(self.w_mapping)
+
def descr_repr(self, space):
- return space.wrap(u"mappingproxy(%s)" % (
- space.unicode_w(W_DictObject.descr_repr(self, space))))
+ return space.newunicode(u"mappingproxy(%s)" %
+ (space.unicode_w(space.repr(self.w_mapping)),))
+
+ @unwrap_spec(w_default=WrappedDefault(None))
+ def get_w(self, space, w_key, w_default):
+ return space.call_method(self.w_mapping, "get", w_key, w_default)
+
+ def keys_w(self, space):
+ return space.call_method(self.w_mapping, "keys")
+
+ def values_w(self, space):
+ return space.call_method(self.w_mapping, "values")
+
+ def items_w(self, space):
+ return space.call_method(self.w_mapping, "items")
+
+ def copy_w(self, space):
+ return space.call_method(self.w_mapping, "copy")
+
+cmp_methods = {}
+def make_cmp_method(op):
+ def descr_op(self, space, w_other):
+ return getattr(space, op)(self.w_mapping, w_other)
+ descr_name = 'descr_' + op
+ descr_op.__name__ = descr_name
+ setattr(W_DictProxyObject, descr_name, descr_op)
+ cmp_methods['__%s__' % op] = interp2app(getattr(W_DictProxyObject, descr_name))
+
+for op in ['eq', 'ne', 'gt', 'ge', 'lt', 'le']:
+ make_cmp_method(op)
+
W_DictProxyObject.typedef = TypeDef(
- "mappingproxy", W_DictObject.typedef,
- __new__ = interp2app(W_DictProxyObject.descr_new),
- __init__ = interp2app(W_DictProxyObject.descr_init),
- __repr__ = interp2app(W_DictProxyObject.descr_repr),
+ 'mappingproxy',
+ __new__=interp2app(W_DictProxyObject.descr_new),
+ __init__=interp2app(W_DictProxyObject.descr_init),
+ __len__=interp2app(W_DictProxyObject.descr_len),
+ __getitem__=interp2app(W_DictProxyObject.descr_getitem),
+ __contains__=interp2app(W_DictProxyObject.descr_contains),
+ __iter__=interp2app(W_DictProxyObject.descr_iter),
+ __str__=interp2app(W_DictProxyObject.descr_str),
+ __repr__=interp2app(W_DictProxyObject.descr_repr),
+ get=interp2app(W_DictProxyObject.get_w),
+ keys=interp2app(W_DictProxyObject.keys_w),
+ values=interp2app(W_DictProxyObject.values_w),
+ items=interp2app(W_DictProxyObject.items_w),
+ copy=interp2app(W_DictProxyObject.copy_w),
+ **cmp_methods
)
-
-
-class DictProxyStrategy(DictStrategy):
- """Exposes a W_TypeObject.dict_w at app-level.
-
- Uses getdictvalue() and setdictvalue() to access items.
- """
- erase, unerase = rerased.new_erasing_pair("dictproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
-
- def getitem(self, w_dict, w_key):
- space = self.space
- w_lookup_type = space.type(w_key)
- if space.issubtype_w(w_lookup_type, space.w_unicode):
- return self.getitem_str(w_dict, space.str_w(w_key))
- else:
- return None
-
- def getitem_str(self, w_dict, key):
- return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
-
- def setitem(self, w_dict, w_key, w_value):
- space = self.space
- if space.is_w(space.type(w_key), space.w_unicode):
- self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
- else:
- raise oefmt(space.w_TypeError,
- "cannot add non-string keys to dict of a type")
-
- def setitem_str(self, w_dict, key, w_value):
- w_type = self.unerase(w_dict.dstorage)
- try:
- w_type.setdictvalue(self.space, key, w_value)
- except OperationError as e:
- if not e.match(self.space, self.space.w_TypeError):
- raise
- if not w_type.is_cpytype():
- raise
- # Allow cpyext to write to type->tp_dict even in the case
- # of a builtin type.
- # Like CPython, we assume that this is only done early
- # after the type is created, and we don't invalidate any
- # cache. User code shoud call PyType_Modified().
- w_type.dict_w[key] = w_value
-
- def setdefault(self, w_dict, w_key, w_default):
- w_result = self.getitem(w_dict, w_key)
- if w_result is not None:
- return w_result
- self.setitem(w_dict, w_key, w_default)
- return w_default
-
- def delitem(self, w_dict, w_key):
- space = self.space
- w_key_type = space.type(w_key)
- if space.is_w(w_key_type, space.w_unicode):
- key = self.space.str_w(w_key)
- if not self.unerase(w_dict.dstorage).deldictvalue(space, key):
- raise KeyError
- else:
- raise KeyError
-
- def length(self, w_dict):
- return len(self.unerase(w_dict.dstorage).dict_w)
-
- def w_keys(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- return space.newlist([_wrapkey(space, key)
- for key in w_type.dict_w.iterkeys()])
-
- def values(self, w_dict):
- return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
-
- def items(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- return [space.newtuple([_wrapkey(space, key),
- unwrap_cell(space, w_value)])
- for (key, w_value) in w_type.dict_w.iteritems()]
-
- def clear(self, w_dict):
- space = self.space
- w_type = self.unerase(w_dict.dstorage)
- if not w_type.is_heaptype():
- raise oefmt(space.w_TypeError,
- "can't clear dictionary of type '%N'", w_type)
- w_type.dict_w.clear()
- w_type.mutated(None)
-
- def getiterkeys(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.iterkeys()
- def getitervalues(self, w_dict):
- return self.unerase(w_dict.dstorage).dict_w.itervalues()
- def getiteritems_with_hash(self, w_dict):
- return iteritems_with_hash(self.unerase(w_dict.dstorage).dict_w)
- def wrapkey(space, key):
- return _wrapkey(space, key)
- def wrapvalue(space, value):
- return unwrap_cell(space, value)
-
-def _wrapkey(space, key):
- # keys are utf-8 encoded identifiers from type's dict_w
- return space.wrap(key.decode('utf-8'))
-
-create_iterator_classes(DictProxyStrategy)
-
-
-class MappingProxyStrategy(DictStrategy):
- """Wraps an applevel mapping in a read-only dictionary."""
- erase, unerase = rerased.new_erasing_pair("mappingproxy")
- erase = staticmethod(erase)
- unerase = staticmethod(unerase)
-
- def getitem(self, w_dict, w_key):
- try:
- return self.space.getitem(self.unerase(w_dict.dstorage), w_key)
- except OperationError as e:
- if not e.match(self.space, self.space.w_KeyError):
- raise
- return None
-
- def setitem(self, w_dict, w_key, w_value):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item assignment", w_dict)
-
- def delitem(self, w_dict, w_key):
- raise oefmt(self.space.w_TypeError,
- "'%T' object does not support item deletion", w_dict)
-
- def length(self, w_dict):
- return self.space.len_w(self.unerase(w_dict.dstorage))
-
- def getiterkeys(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "keys"))
-
- def getitervalues(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "values"))
-
- def getiteritems_with_hash(self, w_dict):
- return self.space.iter(
- self.space.call_method(self.unerase(w_dict.dstorage), "items"))
-
- @staticmethod
- def override_next_key(iterkeys):
- w_keys = iterkeys.iterator
- return iterkeys.space.next(w_keys)
-
- @staticmethod
- def override_next_value(itervalues):
- w_values = itervalues.iterator
- return itervalues.space.next(w_values)
-
- @staticmethod
- def override_next_item(iteritems):
- w_items = iteritems.iterator
- w_item = iteritems.space.next(w_items)
- w_key, w_value = iteritems.space.unpackiterable(w_item, 2)
- return w_key, w_value
-
- def clear(self, w_dict):
- raise oefmt(self.space.w_AttributeError, "clear")
-
- def copy(self, w_dict):
- return self.space.call_method(self.unerase(w_dict.dstorage), "copy")
-
-create_iterator_classes(
- MappingProxyStrategy,
- override_next_key=MappingProxyStrategy.override_next_key,
- override_next_value=MappingProxyStrategy.override_next_value,
- override_next_item=MappingProxyStrategy.override_next_item)
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -9,42 +9,20 @@
assert 'a' in NotEmpty.__dict__
assert 'a' in NotEmpty.__dict__.keys()
assert 'b' not in NotEmpty.__dict__
- NotEmpty.__dict__['b'] = 4
- assert NotEmpty.b == 4
- del NotEmpty.__dict__['b']
assert NotEmpty.__dict__.get("b") is None
+ raises(TypeError, "NotEmpty.__dict__['b'] = 4")
raises(TypeError, 'NotEmpty.__dict__[15] = "y"')
- raises(KeyError, 'del NotEmpty.__dict__[15]')
+ raises(TypeError, 'del NotEmpty.__dict__[15]')
- assert NotEmpty.__dict__.setdefault("string", 1) == 1
- assert NotEmpty.__dict__.setdefault("string", 2) == 1
- assert NotEmpty.string == 1
- raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
-
- def test_dictproxy_popitem(self):
- class A(object):
- a = 42
- seen = 0
- try:
- while True:
- key, value = A.__dict__.popitem()
- if key == 'a':
- assert value == 42
- seen += 1
- except KeyError:
- pass
- assert seen == 1
+ raises(AttributeError, 'NotEmpty.__dict__.setdefault')
def test_dictproxy_getitem(self):
class NotEmpty(object):
a = 1
assert 'a' in NotEmpty.__dict__
- class substr(str): pass
+ class substr(str):
+ pass
assert substr('a') in NotEmpty.__dict__
- # the following are only for py2
- ## assert u'a' in NotEmpty.__dict__
- ## assert NotEmpty.__dict__[u'a'] == 1
- ## assert u'\xe9' not in NotEmpty.__dict__
def test_dictproxyeq(self):
class a(object):
@@ -63,9 +41,9 @@
class a(object):
pass
s1 = repr(a.__dict__)
+ assert s1.startswith('mappingproxy({') and s1.endswith('})')
s2 = str(a.__dict__)
- assert s1 == s2
- assert s1.startswith('mappingproxy({') and s1.endswith('})')
+ assert s1 == 'mappingproxy(%s)' % s2
def test_immutable_dict_on_builtin_type(self):
raises(TypeError, "int.__dict__['a'] = 1")
@@ -100,4 +78,3 @@
class AppTestUserObjectMethodCache(AppTestUserObject):
spaceconfig = {"objspace.std.withmethodcachecounter": True}
-
diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
--- a/pypy/objspace/std/test/test_obj.py
+++ b/pypy/objspace/std/test/test_obj.py
@@ -185,12 +185,12 @@
skip("cannot run this test as apptest")
for u in [u"", u"a", u"aa"]:
assert id(self.unwrap_wrap_unicode(u)) == id(u)
- s = str(u)
- assert id(self.unwrap_wrap_str(s)) == id(s)
+ s = u.encode()
+ assert id(self.unwrap_wrap_bytes(s)) == id(s)
#
- assert id('') == (256 << 4) | 11 # always
+ assert id(b'') == (256 << 4) | 11 # always
assert id(u'') == (257 << 4) | 11
- assert id('a') == (ord('a') << 4) | 11
+ assert id(b'a') == (ord('a') << 4) | 11
assert id(u'\u1234') == ((~0x1234) << 4) | 11
def test_id_of_tuples(self):
@@ -243,13 +243,13 @@
l = []
def add(s, u):
l.append(s)
- l.append(self.unwrap_wrap_str(s))
+ l.append(self.unwrap_wrap_bytes(s))
l.append(s[:1] + s[1:])
l.append(u)
l.append(self.unwrap_wrap_unicode(u))
l.append(u[:1] + u[1:])
for i in range(3, 18):
- add(str(i), unicode(i))
+ add(str(i).encode(), str(i))
add(b"s", u"s")
add(b"", u"")
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -970,7 +970,6 @@
raises(TypeError, setattr, list, 'foobar', 42)
raises(TypeError, delattr, dict, 'keys')
raises(TypeError, 'int.__dict__["a"] = 1')
- raises(TypeError, 'int.__dict__.clear()')
def test_nontype_in_mro(self):
class OldStyle:
@@ -1028,10 +1027,9 @@
pass
a = A()
+ d = A.__dict__
A.x = 1
- assert A.__dict__["x"] == 1
- A.__dict__['x'] = 5
- assert A.x == 5
+ assert d["x"] == 1
def test_we_already_got_one_1(self):
# Issue #2079: highly obscure: CPython complains if we say
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -4,8 +4,8 @@
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import (
Function, StaticMethod, ClassMethod, FunctionWithFixedCode)
-from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\
- descr_get_dict, dict_descr, Member, TypeDef
+from pypy.interpreter.typedef import (
+ weakref_descr, GetSetProperty, dict_descr, Member, TypeDef)
from pypy.interpreter.astcompiler.misc import mangle
from pypy.module.__builtin__ import abstractinst
@@ -344,7 +344,7 @@
def deldictvalue(self, space, key):
if self.lazyloaders:
self._cleanup_() # force un-lazification
- if not self.is_heaptype():
+ if not (self.is_heaptype() or self.is_cpytype()):
raise oefmt(space.w_TypeError,
"can't delete attributes on type object '%N'", self)
try:
@@ -483,14 +483,14 @@
self.getdictvalue(self.space, attr)
del self.lazyloaders
- def getdict(self, space): # returning a dict-proxy!
- from pypy.objspace.std.dictproxyobject import DictProxyStrategy
- from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ def getdict(self, space):
+ from pypy.objspace.std.classdict import ClassDictStrategy
+ from pypy.objspace.std.dictmultiobject import W_DictObject
if self.lazyloaders:
self._cleanup_() # force un-lazification
- strategy = space.fromcache(DictProxyStrategy)
+ strategy = space.fromcache(ClassDictStrategy)
storage = strategy.erase(self)
- return W_DictProxyObject(space, strategy, storage)
+ return W_DictObject(space, strategy, storage)
def is_heaptype(self):
return self.flag_heaptype
@@ -929,6 +929,13 @@
return space.newbool(
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
+def type_get_dict(space, w_cls):
+ from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+ w_dict = w_cls.getdict(space)
+ if w_dict is None:
+ return space.w_None
+ return W_DictProxyObject(w_dict)
+
W_TypeObject.typedef = TypeDef("type",
__new__ = gateway.interp2app(descr__new__),
__name__ = GetSetProperty(descr_get__name__, descr_set__name__),
@@ -936,7 +943,7 @@
__bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__),
__base__ = GetSetProperty(descr__base),
__mro__ = GetSetProperty(descr_get__mro__),
- __dict__ = GetSetProperty(descr_get_dict),
+ __dict__=GetSetProperty(type_get_dict),
__doc__ = GetSetProperty(descr__doc, descr_set__doc),
__dir__ = gateway.interp2app(descr__dir),
mro = gateway.interp2app(descr_mro),
From pypy.commits at gmail.com Thu Aug 11 10:36:49 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 11 Aug 2016 07:36:49 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: remove always_inline from function,
and change it to a class function
Message-ID: <57ac8d81.a111c20a.22d77.09e6@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86142:511f8024c517
Date: 2016-08-11 16:34 +0200
http://bitbucket.org/pypy/pypy/changeset/511f8024c517/
Log: remove always_inline from function, and change it to a class
function
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -53,18 +53,6 @@
else:
return " object";
- at always_inline
-def list_unpack_helper(frame, itemcount):
- space = frame.space
- w_sum = space.newlist([], sizehint=itemcount)
- for i in range(itemcount, 0, -1):
- w_item = frame.peekvalue(i-1)
- w_sum.extend(w_item)
- while itemcount != 0:
- frame.popvalue()
- itemcount -= 1
- return w_sum
-
opcodedesc = bytecode_spec.opcodedesc
HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT
@@ -1388,13 +1376,26 @@
itemcount -= 1
self.pushvalue(w_sum)
+ @jit.unroll_safe
+ def list_unpack_helper(frame, itemcount):
+ space = frame.space
+ w_sum = space.newlist([], sizehint=itemcount)
+ for i in range(itemcount, 0, -1):
+ w_item = frame.peekvalue(i-1)
+ w_sum.extend(w_item)
+ while itemcount != 0:
+ frame.popvalue()
+ itemcount -= 1
+ return w_sum
+
+
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
- w_list = list_unpack_helper(self, itemcount)
+ w_list = self.list_unpack_helper(itemcount)
items = [w_obj for w_obj in w_list.getitems_unroll()]
self.pushvalue(self.space.newtuple(items))
def BUILD_LIST_UNPACK(self, itemcount, next_instr):
- w_sum = list_unpack_helper(self, itemcount)
+ w_sum = self.list_unpack_helper(itemcount)
self.pushvalue(w_sum)
def BUILD_MAP_UNPACK_WITH_CALL(self, itemcount, next_instr):
From pypy.commits at gmail.com Thu Aug 11 11:00:38 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 11 Aug 2016 08:00:38 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: add unroll_safe to opcode dispatch
function (loop inside)
Message-ID: <57ac9316.e2efc20a.985c1.0eeb@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86143:1e95da517f26
Date: 2016-08-11 16:59 +0200
http://bitbucket.org/pypy/pypy/changeset/1e95da517f26/
Log: add unroll_safe to opcode dispatch function (loop inside)
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1388,7 +1388,7 @@
itemcount -= 1
return w_sum
-
+ @jit.unroll_safe
def BUILD_TUPLE_UNPACK(self, itemcount, next_instr):
w_list = self.list_unpack_helper(itemcount)
items = [w_obj for w_obj in w_list.getitems_unroll()]
From pypy.commits at gmail.com Thu Aug 11 11:46:29 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 11 Aug 2016 08:46:29 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: hg merge default
Message-ID: <57ac9dd5.262ec20a.a0119.2967@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86144:b053ff5c2d6d
Date: 2016-08-11 16:19 +0100
http://bitbucket.org/pypy/pypy/changeset/b053ff5c2d6d/
Log: hg merge default
diff too long, truncating to 2000 out of 4035 lines
diff --git a/include/PyPy.h b/include/PyPy.h
--- a/include/PyPy.h
+++ b/include/PyPy.h
@@ -2,7 +2,11 @@
#define _PYPY_H_
/* This header is meant to be included in programs that use PyPy as an
- embedded library. */
+ embedded library.
+
+ NOTE: this is deprecated. Instead, use cffi's embedding support:
+ http://cffi.readthedocs.org/en/latest/embedding.html
+*/
#ifdef __cplusplus
extern "C" {
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.7.0
+Version: 1.8.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
@@ -59,7 +61,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
@@ -196,20 +198,6 @@
return NULL;
}
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
- const char *fnname)
-{
- if (PyTuple_GET_SIZE(args_tuple) != expected) {
- PyErr_Format(PyExc_TypeError,
- "%.150s() takes exactly %zd arguments (%zd given)",
- fnname, expected, PyTuple_GET_SIZE(args_tuple));
- return NULL;
- }
- return &PyTuple_GET_ITEM(args_tuple, 0); /* pointer to the first item,
- the others follow */
-}
-
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
+ if self.ffi._embedding is None:
+ prnt('#define Py_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
@@ -683,13 +685,11 @@
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
- prnt(' PyObject **aa;')
prnt()
- prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
- prnt(' if (aa == NULL)')
+ prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+ name, len(rng), len(rng),
+ ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
- for i in rng:
- prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
+ self._check_not_opaque(fldtype,
+ "field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
@@ -911,6 +913,13 @@
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
+ def _check_not_opaque(self, tp, location):
+ while isinstance(tp, model.ArrayType):
+ tp = tp.item
+ if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+ raise TypeError(
+ "%s is of an opaque type (not declared in cdef())" % location)
+
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would be a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
Defaults to 1/2 of your cache or ``4M``.
Small values (like 1 or 1KB) are useful for debugging.
-``PYPY_GC_NURSERY_CLEANUP``
- The interval at which nursery is cleaned up. Must
- be smaller than the nursery size and bigger than the
- biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+ If set to non-zero, will fill nursery with garbage, to help
+ debugging.
``PYPY_GC_INCREMENT_STEP``
The size of memory marked during the marking step. Default is size of
@@ -62,3 +61,8 @@
use.
Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+ The maximal number of pinned objects at any point in time. Defaults
+ to a conservative value depending on nursery size and maximum object
+ size inside the nursery. Useful for debugging by setting it to 0.
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
pypy
======
+.. note: this is turned into a regular man page "pypy.1" by
+ doing "make man" in pypy/doc/
+
SYNOPSIS
========
@@ -48,6 +51,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -105,3 +105,26 @@
.. branch: ep2016sprint
Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
+
+.. branch: cpyext-realloc
+
+Implement PyObject_Realloc
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -114,8 +114,15 @@
return getattr(space, name)(operand)
return do_fold
-def _fold_pow(space, left, right):
- return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+ # don't constant-fold if "w_left" and "w_right" are integers and
+ # the estimated bit length of the power is unreasonably large
+ space.appexec([w_left, w_right], """(left, right):
+ if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+ if left.bit_length() * right > 5000:
+ raise OverflowError
+ """)
+ return space.pow(w_left, w_right, space.w_None)
def _fold_not(space, operand):
return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1307,3 +1307,22 @@
counts = self.count_instructions(source)
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
+
+ def test_dont_fold_huge_powers(self):
+ for source in (
+ "2 ** 3000", # not constant-folded: too big
+ "(-2) ** 3000",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER in counts
+
+ for source in (
+ "2 ** 2000", # constant-folded
+ "2 ** -3000",
+ "1.001 ** 3000",
+ "1 ** 3000.0",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER not in counts
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1801,6 +1801,40 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
+ def resource_warning(self, w_msg, w_tb):
+ self.appexec([w_msg, w_tb],
+ """(msg, tb):
+ import sys
+ print >> sys.stderr, msg
+ if tb:
+ print >> sys.stderr, "Created at (most recent call last):"
+ print >> sys.stderr, tb
+ """)
+
+ def format_traceback(self):
+ # we need to disable track_resources before calling the traceback
+ # module. Else, it tries to open more files to format the traceback,
+ # the file constructor will call space.format_traceback etc., in an
+ # inifite recursion
+ flag = self.sys.track_resources
+ self.sys.track_resources = False
+ try:
+ return self.appexec([],
+ """():
+ import sys, traceback
+ # the "1" is because we don't want to show THIS code
+ # object in the traceback
+ try:
+ f = sys._getframe(1)
+ except ValueError:
+ # this happens if you call format_traceback at the very beginning
+ # of startup, when there is no bottom code object
+ return ''
+ return "".join(traceback.format_stack(f))
+ """)
+ finally:
+ self.sys.track_resources = flag
+
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -209,6 +209,13 @@
self.check(['-c', 'pass'], {'PYTHONNOUSERSITE': '1'}, sys_argv=['-c'],
run_command='pass', **expected)
+ def test_track_resources(self, monkeypatch):
+ myflag = [False]
+ def pypy_set_track_resources(flag):
+ myflag[0] = flag
+ monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
+ self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
+ assert myflag[0] == True
class TestInteraction:
"""
@@ -1152,4 +1159,3 @@
# assert it did not crash
finally:
sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -458,3 +458,28 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
+
+ def test_format_traceback(self):
+ from pypy.tool.pytest.objspace import maketestobjspace
+ from pypy.interpreter.gateway import interp2app
+ #
+ def format_traceback(space):
+ return space.format_traceback()
+ #
+ space = maketestobjspace()
+ w_format_traceback = space.wrap(interp2app(format_traceback))
+ w_tb = space.appexec([w_format_traceback], """(format_traceback):
+ def foo():
+ return bar()
+ def bar():
+ return format_traceback()
+ return foo()
+ """)
+ tb = space.str_w(w_tb)
+ expected = '\n'.join([
+ ' File "?", line 6, in anonymous', # this is the appexec code object
+ ' File "?", line 3, in foo',
+ ' File "?", line 5, in bar',
+ ''
+ ])
+ assert tb == expected
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.7.0"
+VERSION = "1.8.0"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -16,8 +16,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -30,6 +30,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -72,9 +75,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("bytes or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -262,8 +263,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = w_init.str_w(space)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -300,10 +309,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -402,21 +402,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,6 +1,7 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
+from pypy.interpreter.gateway import interp2app
from rpython.tool.udir import udir
from rpython.rlib import rsocket
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -13,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -42,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -58,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -139,7 +138,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -155,9 +154,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -176,9 +175,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -197,7 +196,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -231,7 +230,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -653,11 +652,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -673,11 +672,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -146,7 +146,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -158,7 +158,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -192,7 +192,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -204,7 +204,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -239,7 +239,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def _RAND_bytes(space, n, pseudo):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -11,6 +11,9 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from rpython.rlib.objectmodel import dont_inline
+from rpython.rlib.rfile import (FILEP, c_fread, c_fclose, c_fwrite,
+ c_fdopen, c_fileno,
+ c_fopen)# for tests
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -84,44 +87,32 @@
assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
-FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
dash = '_'
else:
dash = ''
-fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
-fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
- FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
-_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
- if not is_valid_fd(fileno(fp)):
+ if not is_valid_fd(c_fileno(fp)):
return -1
- return _fclose(fp)
+ return c_fclose(fp)
-_fwrite = rffi.llexternal('fwrite',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fwrite(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fwrite(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fwrite(buf, sz, n, fp)
-_fread = rffi.llexternal('fread',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fread(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fread(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fread(buf, sz, n, fp)
_feof = rffi.llexternal('feof', [FILEP], rffi.INT)
def feof(fp):
- validate_fd(fileno(fp))
+ validate_fd(c_fileno(fp))
return _feof(fp)
def is_valid_fp(fp):
- return is_valid_fd(fileno(fp))
+ return is_valid_fd(c_fileno(fp))
pypy_decl = 'pypy_decl.h'
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
diff --git a/pypy/module/cpyext/c-api.txt b/pypy/module/cpyext/c-api.txt
deleted file mode 100644
--- a/pypy/module/cpyext/c-api.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Reference Count
-===============
-
-XXX
-
-Borrowed References
-===================
-
-XXX
-
-PyStringObject support
-======================
-
-The problem
------------
-
-PyString_AsString() returns a (non-movable) pointer to the underlying
-buffer, whereas pypy strings are movable. C code may temporarily
-store this address and use it, as long as it owns a reference to the
-PyObject. There is no "release" function to specify that the pointer
-is not needed any more.
-
-Note that the pointer may be used to fill the initial value of
-string. This is valid only when the string was just allocated, and is
-not used elsewhere.
-
-Proposed solution
------------------
-
-Our emulation of the PyStringObject contains an additional member: a
-pointer to a char buffer; it may be NULL.
-
-- A string allocated by pypy will be converted into a PyStringObject
- with a NULL buffer. When PyString_AsString() is called, memory is
- allocated (with flavor='raw') and content is copied.
-
-- A string allocated with PyString_FromStringAndSize(NULL, size) will
- allocate a buffer with the specified size, but the reference won't
- be stored in the global map py_objects_r2w; there won't be a
- corresponding object in pypy. When from_ref() or Py_INCREF() is
- called, the pypy string is created, and added in py_objects_r2w.
- The buffer is then supposed to be immutable.
-
-- _PyString_Resize works only on not-yet-pypy'd strings, and returns a
- similar object.
-
-- PyString_Size don't need to force the object. (in this case, another
- "size" member is needed)
-
-- There could be an (expensive!) check in from_ref() that the buffer
- still corresponds to the pypy gc-managed string.
-
-PySequence_Fast support
-======================
-There are five functions for fast sequence access offered by the CPython API:
-
-PyObject* PySequence_Fast(PyObject *o, const char *m)
-
-PyObject* PySequence_Fast_GET_ITEM( PyObject *o, int i)
-
-PyObject** PySequence_Fast_ITEMS( PyObject *o)
-
-PyObject* PySequence_ITEM( PyObject *o, int i)
-
-int PySequence_Fast_GET_SIZE( PyObject *o)
-
-PyPy supports four of these, but does not support PySequence_Fast_ITEMS.
-(Various ways to support PySequence_Fast_ITEMS were considered. They all had
-two things in common: they would have taken a lot of work, and they would have
-resulted in incomplete semantics or in poor performance. We decided that a slow
-implementation of PySequence_Fast_ITEMS was not very useful.)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -25,6 +25,8 @@
flavor='raw',
add_memory_pressure=True)
+realloc = rffi.llexternal('realloc', [rffi.VOIDP, rffi.SIZE_T], rffi.VOIDP)
+
@cpython_api([rffi.VOIDP, size_t], rffi.VOIDP)
def PyObject_Realloc(space, ptr, size):
if not lltype.cast_ptr_to_int(ptr):
@@ -32,7 +34,7 @@
flavor='raw',
add_memory_pressure=True)
# XXX FIXME
- return lltype.nullptr(rffi.VOIDP.TO)
+ return realloc(ptr, size)
@cpython_api([rffi.VOIDP], lltype.Void)
def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -10,7 +10,7 @@
from pypy.objspace.std import tupleobject
from pypy.module.cpyext.tupleobject import PyTuple_Check, PyTuple_SetItem
-from pypy.module.cpyext.object import Py_IncRef, Py_DecRef
+from pypy.module.cpyext.pyobject import decref
from pypy.module.cpyext.dictobject import PyDict_Check
@@ -252,7 +252,7 @@
def setitem(self, w_list, index, w_obj):
storage = self.unerase(w_list.lstorage)
index = self._check_index(index, storage._length)
- Py_DecRef(w_list.space, storage._elems[index])
+ decref(w_list.space, storage._elems[index])
storage._elems[index] = make_ref(w_list.space, w_obj)
def length(self, w_list):
@@ -264,9 +264,8 @@
return storage._elems
def getslice(self, w_list, start, stop, step, length):
- #storage = self.unerase(w_list.lstorage)
- raise oefmt(w_list.space.w_NotImplementedError,
- "settting a slice of a PySequence_Fast is not supported")
+ w_list.switch_to_object_strategy()
+ return w_list.strategy.getslice(w_list, start, stop, step, length)
def getitems(self, w_list):
# called when switching list strategy, so convert storage
@@ -389,5 +388,5 @@
def __del__(self):
for i in range(self._length):
- Py_DecRef(self.space, self._elems[i])
+ decref(self.space, self._elems[i])
lltype.free(self._elems, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.eval import (
Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
-from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
+from pypy.module.cpyext.api import c_fopen, c_fclose, c_fileno, Py_ssize_tP
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.astcompiler import consts
from rpython.tool.udir import udir
@@ -130,19 +130,19 @@
def test_run_file(self, space, api):
filepath = udir / "cpyext_test_runfile.py"
filepath.write("raise ZeroDivisionError")
- fp = fopen(str(filepath), "rb")
+ fp = c_fopen(str(filepath), "rb")
filename = rffi.str2charp(str(filepath))
w_globals = w_locals = space.newdict()
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_ZeroDivisionError
api.PyErr_Clear()
# try again, but with a closed file
- fp = fopen(str(filepath), "rb")
- os.close(fileno(fp))
+ fp = c_fopen(str(filepath), "rb")
+ os.close(c_fileno(fp))
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_IOError
api.PyErr_Clear()
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -212,8 +212,9 @@
assert type(x) is float
assert x == -12.34
- @pytest.mark.skipif(True, reason='realloc not fully implemented')
def test_object_realloc(self):
+ if not self.runappdirect:
+ skip('no untranslated support for realloc')
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
"""
@@ -221,12 +222,11 @@
char *copy, *orig = PyObject_MALLOC(12);
memcpy(orig, "hello world", 12);
copy = PyObject_REALLOC(orig, 15);
+ /* realloc() takes care of freeing orig, if changed */
if (copy == NULL)
Py_RETURN_NONE;
ret = PyBytes_FromStringAndSize(copy, 12);
- if (copy != orig)
- PyObject_Free(copy);
- PyObject_Free(orig);
+ PyObject_Free(copy);
return ret;
""")])
x = module.realloctest()
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -78,6 +78,17 @@
assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+ def test_get_slice_fast(self, space, api):
+ w_t = space.wrap([1, 2, 3, 4, 5])
+ api.PySequence_Fast(w_t, "foo") # converts
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 2, 4)) == [3, 4]
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 1, -1)) == [2, 3, 4]
+
+ assert api.PySequence_DelSlice(w_t, 1, 4) == 0
+ assert space.eq_w(w_t, space.wrap([1, 5]))
+ assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
+ assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+
def test_iter(self, space, api):
w_t = space.wrap((1, 2))
w_iter = api.PySeqIter_New(w_t)
@@ -226,18 +237,33 @@
assert space.int_w(space.len(w_l)) == 10
-class XAppTestSequenceObject(AppTestCpythonExtensionBase):
- def test_sequenceobject(self):
+class AppTestSequenceObject(AppTestCpythonExtensionBase):
+ def test_fast(self):
module = self.import_extension('foo', [
("test_fast_sequence", "METH_VARARGS",
"""
- PyObject * o = PyTuple_GetItem(args, 0);
+ int size, i;
+ PyTypeObject * common_type;
+ PyObject *foo, **objects;
+ PyObject * seq = PyTuple_GetItem(args, 0);
/* XXX assert it is a tuple */
- PyObject *foo = PySequence_Fast(o, "some string");
- PyObject ** res = PySequence_Fast_ITEMS(foo);
- /* XXX do some kind of test on res */
- /* XXX now what? who manages res's refcount? */
+ if (seq == NULL)
+ Py_RETURN_NONE;
+ foo = PySequence_Fast(seq, "some string");
+ objects = PySequence_Fast_ITEMS(foo);
+ size = PySequence_Fast_GET_SIZE(seq);
+ common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
+ for (i = 1; i < size; ++i) {
+ if (Py_TYPE(objects[i]) != common_type) {
+ common_type = NULL;
+ break;
+ }
+ }
+ Py_DECREF(foo);
+ Py_DECREF(common_type);
return PyBool_FromLong(1);
""")])
- assert module.test_fast_sequence([1, 2, 3, 4])
+ s = [1, 2, 3, 4]
+ assert module.test_fast_sequence(s[0:-1])
+ assert module.test_fast_sequence(s[::-1])
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -47,6 +47,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from rpython.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
@@ -54,7 +55,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, intmask(next_instr))
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -19,6 +19,7 @@
self.defaultencoding = "utf-8"
self.filesystemencoding = None
self.debug = True
+ self.track_resources = False
self.dlopenflags = rdynload._dlopen_default_mode()
interpleveldefs = {
@@ -48,6 +49,8 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
+ 'pypy_set_track_resources' : 'vm.set_track_resources',
+ 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,6 +61,13 @@
"""
return space.wrap(space.sys.recursionlimit)
+ at unwrap_spec(flag=bool)
+def set_track_resources(space, flag):
+ space.sys.track_resources = flag
+
+def get_track_resources(space):
+ return space.wrap(space.sys.track_resources)
+
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
@@ -130,7 +130,7 @@
cls.module = str(udir.join('testownlib.dll'))
else:
subprocess.check_call(
- 'gcc testownlib.c -shared -fPIC -o testownlib.so',
+ 'cc testownlib.c -shared -fPIC -o testownlib.so',
cwd=str(udir), shell=True)
cls.module = str(udir.join('testownlib.so'))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -852,9 +852,12 @@
assert str(e2.value) == "foo0() takes no arguments (2 given)"
assert str(e3.value) == "foo1() takes exactly one argument (0 given)"
assert str(e4.value) == "foo1() takes exactly one argument (2 given)"
- assert str(e5.value) == "foo2() takes exactly 2 arguments (0 given)"
- assert str(e6.value) == "foo2() takes exactly 2 arguments (1 given)"
- assert str(e7.value) == "foo2() takes exactly 2 arguments (3 given)"
+ assert str(e5.value) in ["foo2 expected 2 arguments, got 0",
+ "foo2() takes exactly 2 arguments (0 given)"]
+ assert str(e6.value) in ["foo2 expected 2 arguments, got 1",
+ "foo2() takes exactly 2 arguments (1 given)"]
+ assert str(e7.value) in ["foo2 expected 2 arguments, got 3",
+ "foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
@@ -1916,3 +1919,47 @@
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
+def test_struct_field_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[2]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+
+def test_function_arg_opaque():
+ py.test.skip("can currently declare a function with an opaque struct "
+ "as argument, but AFAICT it's impossible to call it later")
+
+def test_function_returns_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a foo(int);")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_function_returns_opaque", "?")
+ assert str(e.value) == ("function foo: 'struct a' is used as result type,"
+ " but is opaque")
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -434,4 +434,5 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
+FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -930,6 +930,7 @@
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
def type_get_dict(space, w_cls):
+ w_cls = _check(space, w_cls)
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
w_dict = w_cls.getdict(space)
if w_dict is None:
@@ -1287,8 +1288,8 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise OperationError(space.w_TypeError, space.wrap(
- u"cycle among base classes: " + u' < '.join(names)))
+ raise oefmt(space.w_TypeError,
+ "cycle among base classes: %s", ' < '.join(names))
class TypeCache(SpaceCache):
diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py
--- a/pypy/tool/pytest/objspace.py
+++ b/pypy/tool/pytest/objspace.py
@@ -143,3 +143,5 @@
def is_w(self, obj1, obj2):
return obj1 is obj2
+ def setitem(self, obj, key, value):
+ obj[key] = value
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
--- a/rpython/annotator/binaryop.py
+++ b/rpython/annotator/binaryop.py
@@ -401,6 +401,9 @@
class __extend__(pairtype(SomeString, SomeTuple),
pairtype(SomeUnicodeString, SomeTuple)):
def mod((s_string, s_tuple)):
+ if not s_string.is_constant():
+ raise AnnotatorError("string formatting requires a constant "
+ "string/unicode on the left of '%'")
is_string = isinstance(s_string, SomeString)
is_unicode = isinstance(s_string, SomeUnicodeString)
assert is_string or is_unicode
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4623,6 +4623,14 @@
a = self.RPythonAnnotator()
a.build_types(main, [int])
+ def test_string_mod_nonconstant(self):
+ def f(x):
+ return x % 5
+ a = self.RPythonAnnotator()
+ e = py.test.raises(AnnotatorError, a.build_types, f, [str])
+ assert ('string formatting requires a constant string/unicode'
+ in str(e.value))
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -293,6 +293,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -304,6 +305,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
while frame.i > 3:
- jitdriver.can_enter_jit(frame=frame, total=total, j=j)
- jitdriver.jit_merge_point(frame=frame, total=total, j=j)
+ jitdriver.can_enter_jit(frame=frame, total=total, j=j,
+ prev_s=prev_s)
+ jitdriver.jit_merge_point(frame=frame, total=total, j=j,
+ prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
@@ -82,6 +86,11 @@
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
+ s = rstr.mallocstr(16)
+ rgc.ll_write_final_null_char(s)
+ rgc.ll_write_final_null_char(prev_s)
+ if (frame.i & 3) == 0:
+ prev_s = s
return chr(total % 253)
#
class Virt2(object):
diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -994,6 +994,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale)
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -2,48 +2,157 @@
from rpython.rlib import jit
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rvmprof import cintf
+from rpython.rlib.rvmprof import cintf, vmprof_execute_code, register_code,\
+ register_code_object_class, _get_vmprof
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
+
class BaseRVMProfTest(object):
- def test_one(self):
- py.test.skip("needs thread-locals in the JIT, which is only available "
- "after translation")
+
+ def setup_method(self, meth):
visited = []
def helper():
+ trace = []
stack = cintf.vmprof_tl_stack.getraw()
- if stack:
- # not during tracing
- visited.append(stack.c_value)
- else:
- visited.append(0)
+ while stack:
+ trace.append((stack.c_kind, stack.c_value))
+ stack = stack.c_next
+ visited.append(trace)
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=[], reds='auto')
+ class CodeObj(object):
+ def __init__(self, name):
+ self.name = name
- def f(n):
+ def get_code_fn(codes, code, arg, c):
+ return code
+
+ def get_name(code):
+ return "foo"
+
+ _get_vmprof().use_weaklist = False
+ register_code_object_class(CodeObj, get_name)
+
+ self.misc = visited, llfn, CodeObj, get_code_fn, get_name
+
+
+ def teardown_method(self, meth):
+ del _get_vmprof().use_weaklist
+
+
+ def test_simple(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
i = 0
while i < n:
- driver.jit_merge_point()
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ c = f(codes, codes[1], 1, c)
+ else:
+ llfn()
+ c -= 1
i += 1
- llfn()
+ return c
- class Hooks(jit.JitHookInterface):
- def after_compile(self, debug_info):
- self.raw_start = debug_info.asminfo.rawstart
-
- hooks = Hooks()
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ return f(codes, codes[0], n, 8)
null = lltype.nullptr(cintf.VMPROFSTACK)
- cintf.vmprof_tl_stack.setraw(null) # make it empty
- self.meta_interp(f, [10], policy=JitPolicy(hooks))
- v = set(visited)
- assert 0 in v
- v.remove(0)
- assert len(v) == 1
- assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- assert cintf.vmprof_tl_stack.getraw() == null
- # ^^^ make sure we didn't leave anything dangling
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception_in_blackhole(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while True:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if i >= n:
+ break
+ i += 1
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ driver.can_enter_jit(code=code, c=c, i=i, codes=codes, n=n)
+ else:
+ llfn()
+ c -= 1
+ if c & 1: # a failing guard
+ pass
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1673,25 +1673,6 @@
dest_addr = AddressLoc(base_loc, ofs_loc, scale, offset_loc.value)
self.save_into_mem(dest_addr, value_loc, size_loc)
- def genop_discard_strsetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
- self.cpu.translate_support_code)
- assert itemsize == 1
- dest_addr = AddressLoc(base_loc, ofs_loc, 0, basesize)
- self.mc.MOV8(dest_addr, val_loc.lowest8bits())
-
- def genop_discard_unicodesetitem(self, op, arglocs):
- base_loc, ofs_loc, val_loc = arglocs
- basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
- self.cpu.translate_support_code)
- if itemsize == 4:
- self.mc.MOV32(AddressLoc(base_loc, ofs_loc, 2, basesize), val_loc)
- elif itemsize == 2:
- self.mc.MOV16(AddressLoc(base_loc, ofs_loc, 1, basesize), val_loc)
- else:
- assert 0, itemsize
-
# genop_discard_setfield_raw = genop_discard_setfield_gc
def genop_math_read_timestamp(self, op, arglocs, resloc):
diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -1219,6 +1219,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self.assembler.load_effective_addr(ofsloc, ofs_items, scale,
resloc, baseloc)
diff --git a/rpython/jit/backend/x86/test/test_rvmprof.py b/rpython/jit/backend/x86/test/test_rvmprof.py
--- a/rpython/jit/backend/x86/test/test_rvmprof.py
+++ b/rpython/jit/backend/x86/test/test_rvmprof.py
@@ -3,5 +3,5 @@
from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest
from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
From pypy.commits at gmail.com Thu Aug 11 11:47:03 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Thu, 11 Aug 2016 08:47:03 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: scatter unroll_safe to more
dispatch function that have a loop inside
Message-ID: <57ac9df7.87941c0a.4ba94.25f0@mx.google.com>
Author: Richard Plangger
Branch: py3.5-async
Changeset: r86145:bdc365a69c47
Date: 2016-08-11 17:17 +0200
http://bitbucket.org/pypy/pypy/changeset/bdc365a69c47/
Log: scatter unroll_safe to more dispatch function that have a loop
inside
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1343,6 +1343,7 @@
raise BytecodeCorruption("unknown opcode, ofs=%d, code=%d, name=%s" %
(ofs, ord(c), name) )
+ @jit.unroll_safe
def BUILD_MAP(self, itemcount, next_instr):
w_dict = self.space.newdict()
for i in range(itemcount):
@@ -1359,6 +1360,7 @@
self.space.call_method(w_set, 'add', w_item)
self.pushvalue(w_set)
+ @jit.unroll_safe
def BUILD_SET_UNPACK(self, itemcount, next_instr):
space = self.space
w_sum = space.newset()
@@ -1398,6 +1400,7 @@
w_sum = self.list_unpack_helper(itemcount)
self.pushvalue(w_sum)
+ @jit.unroll_safe
def BUILD_MAP_UNPACK_WITH_CALL(self, itemcount, next_instr):
space = self.space
num_maps = itemcount & 0xff
@@ -1430,6 +1433,7 @@
num_maps -= 1
self.pushvalue(w_dict)
+ @jit.unroll_safe
def BUILD_MAP_UNPACK(self, itemcount, next_instr):
space = self.space
w_dict = space.newdict()
From pypy.commits at gmail.com Thu Aug 11 11:49:01 2016
From: pypy.commits at gmail.com (rlamy)
Date: Thu, 11 Aug 2016 08:49:01 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merged in marky1991/pypy_new/py3k (pull
request #468)
Message-ID: <57ac9e6d.a719c20a.aba65.2c28@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86152:e2f0f6eced42
Date: 2016-08-11 16:47 +0100
http://bitbucket.org/pypy/pypy/changeset/e2f0f6eced42/
Log: Merged in marky1991/pypy_new/py3k (pull request #468)
Py3k: Fix Translation for FreeBSD
diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py
--- a/pypy/module/_posixsubprocess/interp_subprocess.py
+++ b/pypy/module/_posixsubprocess/interp_subprocess.py
@@ -15,8 +15,9 @@
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
- includes=['unistd.h', 'sys/syscall.h'])
+ includes=['unistd.h', 'sys/syscall.h', 'sys/stat.h'])
HAVE_SYS_SYSCALL_H = platform.Has("syscall")
+ HAVE_SYS_STAT_H = platform.Has("stat")
HAVE_SETSID = platform.Has("setsid")
config = platform.configure(CConfig)
@@ -29,6 +30,8 @@
compile_extra = []
if config['HAVE_SYS_SYSCALL_H']:
compile_extra.append("-DHAVE_SYS_SYSCALL_H")
+if config['HAVE_SYS_STAT_H']:
+ compile_extra.append("-DHAVE_SYS_STAT_H")
if config['HAVE_SETSID']:
compile_extra.append("-DHAVE_SETSID")
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -159,7 +159,6 @@
libraries=rtime.libraries
)
CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC")
- clock_t = platform.SimpleType("clock_t", rffi.ULONG)
has_gettimeofday = platform.Has('gettimeofday')
has_clock_gettime = platform.Has('clock_gettime')
CLOCK_PROF = platform.DefinedConstantInteger('CLOCK_PROF')
@@ -233,7 +232,6 @@
HAS_CLOCK_MONOTONIC = cConfig.CLOCK_MONOTONIC is not None
HAS_MONOTONIC = (_WIN or _MACOSX or
(HAS_CLOCK_GETTIME and (HAS_CLOCK_HIGHRES or HAS_CLOCK_MONOTONIC)))
-clock_t = cConfig.clock_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True)
@@ -1030,7 +1028,10 @@
with lltype.scoped_alloc(rposix.TMS) as tms:
ret = rposix.c_times(tms)
if rffi.cast(lltype.Signed, ret) != -1:
- cpu_time = float(tms.c_tms_utime + tms.c_tms_stime)
+ cpu_time = float(rffi.cast(lltype.Signed,
+ tms.c_tms_utime) +
+ rffi.cast(lltype.Signed,
+ tms.c_tms_stime))
if w_info is not None:
_setinfo(space, w_info, "times()",
1.0 / rposix.CLOCK_TICKS_PER_SECOND,
@@ -1038,7 +1039,7 @@
return space.wrap(cpu_time / rposix.CLOCK_TICKS_PER_SECOND)
return clock(space)
-_clock = external('clock', [], clock_t)
+_clock = external('clock', [], rposix.CLOCK_T)
def clock(space, w_info=None):
"""clock() -> floating point number
@@ -1052,7 +1053,7 @@
pass
value = _clock()
# Is this casting correct?
- if value == rffi.cast(clock_t, -1):
+ if intmask(value) == intmask(rffi.cast(rposix.CLOCK_T, -1)):
raise oefmt(space.w_RuntimeError,
"the processor time used is not available or its value"
"cannot be represented")
From pypy.commits at gmail.com Thu Aug 11 11:49:21 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:21 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Working on fixing freebsd.
Message-ID: <57ac9e81.43681c0a.ff3f.2446@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86146:a7a048ffb511
Date: 2016-08-02 10:54 -0400
http://bitbucket.org/pypy/pypy/changeset/a7a048ffb511/
Log: Working on fixing freebsd.
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -159,7 +159,6 @@
libraries=rtime.libraries
)
CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC")
- clock_t = platform.SimpleType("clock_t", rffi.ULONG)
has_gettimeofday = platform.Has('gettimeofday')
has_clock_gettime = platform.Has('clock_gettime')
CLOCK_PROF = platform.DefinedConstantInteger('CLOCK_PROF')
@@ -233,7 +232,6 @@
HAS_CLOCK_MONOTONIC = cConfig.CLOCK_MONOTONIC is not None
HAS_MONOTONIC = (_WIN or _MACOSX or
(HAS_CLOCK_GETTIME and (HAS_CLOCK_HIGHRES or HAS_CLOCK_MONOTONIC)))
-clock_t = cConfig.clock_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True)
@@ -1032,7 +1030,7 @@
return space.wrap(cpu_time / rposix.CLOCK_TICKS_PER_SECOND)
return clock(space)
-_clock = external('clock', [], clock_t)
+_clock = external('clock', [], rposix.CLOCK_T)
def clock(space, w_info=None):
"""clock() -> floating point number
@@ -1046,7 +1044,7 @@
pass
value = _clock()
# Is this casting correct?
- if value == rffi.cast(clock_t, -1):
+ if value == rffi.cast(rposix.CLOCK_T, -1):
raise oefmt(space.w_RuntimeError,
"the processor time used is not available or its value"
"cannot be represented")
From pypy.commits at gmail.com Thu Aug 11 11:49:26 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:26 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Mostly fix translation on freebsd. A
failure still happens late in translation, but rtyping works at least.
Message-ID: <57ac9e86.d4e01c0a.58539.4911@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86148:2d01427fae77
Date: 2016-08-02 18:57 +0000
http://bitbucket.org/pypy/pypy/changeset/2d01427fae77/
Log: Mostly fix translation on freebsd. A failure still happens late in
translation, but rtyping works at least.
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -1022,7 +1022,10 @@
with lltype.scoped_alloc(rposix.TMS) as tms:
ret = rposix.c_times(tms)
if rffi.cast(lltype.Signed, ret) != -1:
- cpu_time = float(tms.c_tms_utime + tms.c_tms_stime)
+ cpu_time = float(rffi.cast(lltype.Signed,
+ tms.c_tms_utime) +
+ rffi.cast(lltype.Signed,
+ tms.c_tms_stime))
if w_info is not None:
_setinfo(space, w_info, "times()",
1.0 / rposix.CLOCK_TICKS_PER_SECOND,
@@ -1044,7 +1047,7 @@
pass
value = _clock()
# Is this casting correct?
- if value == rffi.cast(rposix.CLOCK_T, -1):
+ if intmask(value) == intmask(rffi.cast(rposix.CLOCK_T, -1)):
raise oefmt(space.w_RuntimeError,
"the processor time used is not available or its value"
"cannot be represented")
From pypy.commits at gmail.com Thu Aug 11 11:49:24 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:24 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Merging.
Message-ID: <57ac9e84.c310c20a.6b7b5.240f@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86147:a5d44d8c7857
Date: 2016-08-02 10:54 -0400
http://bitbucket.org/pypy/pypy/changeset/a5d44d8c7857/
Log: Merging.
diff too long, truncating to 2000 out of 4974 lines
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -452,7 +452,7 @@
RegrTest('test_userstring.py', core=True),
RegrTest('test_uu.py'),
RegrTest('test_uuid.py'),
- RegrTest('test_venv.py'),
+ RegrTest('test_venv.py', usemodules="struct"),
RegrTest('test_wait3.py', usemodules="thread"),
RegrTest('test_wait4.py', usemodules="thread"),
RegrTest('test_warnings.py', core=True),
diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py
--- a/pypy/objspace/std/test/test_obj.py
+++ b/pypy/objspace/std/test/test_obj.py
@@ -211,11 +211,10 @@
def test_identity_vs_id_primitives(self):
import sys
- l = range(-10, 10, 2)
+ l = list(range(-10, 10, 2))
for i in [0, 1, 3]:
l.append(float(i))
l.append(i + 0.1)
- l.append(long(i))
l.append(i + sys.maxsize)
l.append(i - sys.maxsize)
l.append(i + 1j)
diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py
--- a/pypy/objspace/std/test/test_unicodeobject.py
+++ b/pypy/objspace/std/test/test_unicodeobject.py
@@ -976,3 +976,12 @@
raises(TypeError, "u''.encode(None)")
raises(TypeError, "str(b'', encoding=None)")
raises(TypeError, 'u"".encode("utf-8", None)')
+
+ def test_casefold(self):
+ assert 'hello'.casefold() == 'hello'
+ assert 'hELlo'.casefold() == 'hello'
+ assert 'ß'.casefold() == 'ss'
+ assert 'fi'.casefold() == 'fi'
+ assert '\u03a3'.casefold() == '\u03c3'
+ assert 'A\u0345\u03a3'.casefold() == 'a\u03b9\u03c3'
+ assert '\u00b5'.casefold() == '\u03bc'
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -414,6 +414,19 @@
def _join_check_item(self, space, w_obj):
return not space.isinstance_w(w_obj, space.w_unicode)
+ def descr_casefold(self, space):
+ value = self._val(space)
+ builder = self._builder(len(value))
+ for c in value:
+ c_ord = ord(c)
+ folded = unicodedb.casefold_lookup(c_ord)
+ if folded is None:
+ builder.append(unichr(unicodedb.tolower(c_ord)))
+ else:
+ for r in folded:
+ builder.append(unichr(r))
+ return self._new(builder.build())
+
def descr_isdecimal(self, space):
return self._is_generic(space, '_isdecimal')
@@ -815,6 +828,12 @@
and there is at least one character in S, False otherwise.
"""
+ def casefold():
+ """S.casefold() -> str
+
+ Return a version of S suitable for caseless comparisons.
+ """
+
def isdecimal():
"""S.isdecimal() -> bool
@@ -1105,6 +1124,8 @@
capitalize = interp2app(W_UnicodeObject.descr_capitalize,
doc=UnicodeDocstrings.capitalize.__doc__),
+ casefold = interp2app(W_UnicodeObject.descr_casefold,
+ doc=UnicodeDocstrings.casefold.__doc__),
center = interp2app(W_UnicodeObject.descr_center,
doc=UnicodeDocstrings.center.__doc__),
count = interp2app(W_UnicodeObject.descr_count,
diff --git a/rpython/rlib/unicodedata/CaseFolding-3.2.0.txt b/rpython/rlib/unicodedata/CaseFolding-3.2.0.txt
new file mode 100644
--- /dev/null
+++ b/rpython/rlib/unicodedata/CaseFolding-3.2.0.txt
@@ -0,0 +1,912 @@
+# CaseFolding-3.2.0.txt
+# Date: 2002-03-22,20:54:33 GMT [MD]
+#
+# Case Folding Properties
+#
+# This file is a supplement to the UnicodeData file.
+# It provides a case folding mapping generated from the Unicode Character Database.
+# If all characters are mapped according to the full mapping below, then
+# case differences (according to UnicodeData.txt and SpecialCasing.txt)
+# are eliminated.
+#
+# The data supports both implementations that require simple case foldings
+# (where string lengths don't change), and implementations that allow full case folding
+# (where string lengths may grow). Note that where they can be supported, the
+# full case foldings are superior: for example, they allow "MASSE" and "Ma�e" to match.
+#
+# NOTE: case folding does not preserve normalization formats!
+#
+# For information on case folding, see
+# UTR #21 Case Mappings, at http://www.unicode.org/unicode/reports/tr21/
+#
+# ================================================================================
+# Format
+# ================================================================================
+# The entries in this file are in the following machine-readable format:
+#
+# ; ; ; #
+#
+# The status field is:
+# C: common case folding, common mappings shared by both simple and full mappings.
+# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces.
+# S: simple case folding, mappings to single characters where different from F.
+# T: special case for uppercase I and dotted uppercase I
+# - For non-Turkic languages, this mapping is normally not used.
+# - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters.
+#
+# Usage:
+# A. To do a simple case folding, use the mappings with status C + S.
+# B. To do a full case folding, use the mappings with status C + F.
+#
+# The mappings with status T can be used or omitted depending on the desired case-folding
+# behavior. (The default option is to exclude them.)
+#
+# =================================================================
+
+0041; C; 0061; # LATIN CAPITAL LETTER A
+0042; C; 0062; # LATIN CAPITAL LETTER B
+0043; C; 0063; # LATIN CAPITAL LETTER C
+0044; C; 0064; # LATIN CAPITAL LETTER D
+0045; C; 0065; # LATIN CAPITAL LETTER E
+0046; C; 0066; # LATIN CAPITAL LETTER F
+0047; C; 0067; # LATIN CAPITAL LETTER G
+0048; C; 0068; # LATIN CAPITAL LETTER H
+0049; C; 0069; # LATIN CAPITAL LETTER I
+0049; T; 0131; # LATIN CAPITAL LETTER I
+004A; C; 006A; # LATIN CAPITAL LETTER J
+004B; C; 006B; # LATIN CAPITAL LETTER K
+004C; C; 006C; # LATIN CAPITAL LETTER L
+004D; C; 006D; # LATIN CAPITAL LETTER M
+004E; C; 006E; # LATIN CAPITAL LETTER N
+004F; C; 006F; # LATIN CAPITAL LETTER O
+0050; C; 0070; # LATIN CAPITAL LETTER P
+0051; C; 0071; # LATIN CAPITAL LETTER Q
+0052; C; 0072; # LATIN CAPITAL LETTER R
+0053; C; 0073; # LATIN CAPITAL LETTER S
+0054; C; 0074; # LATIN CAPITAL LETTER T
+0055; C; 0075; # LATIN CAPITAL LETTER U
+0056; C; 0076; # LATIN CAPITAL LETTER V
+0057; C; 0077; # LATIN CAPITAL LETTER W
+0058; C; 0078; # LATIN CAPITAL LETTER X
+0059; C; 0079; # LATIN CAPITAL LETTER Y
+005A; C; 007A; # LATIN CAPITAL LETTER Z
+00B5; C; 03BC; # MICRO SIGN
+00C0; C; 00E0; # LATIN CAPITAL LETTER A WITH GRAVE
+00C1; C; 00E1; # LATIN CAPITAL LETTER A WITH ACUTE
+00C2; C; 00E2; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+00C3; C; 00E3; # LATIN CAPITAL LETTER A WITH TILDE
+00C4; C; 00E4; # LATIN CAPITAL LETTER A WITH DIAERESIS
+00C5; C; 00E5; # LATIN CAPITAL LETTER A WITH RING ABOVE
+00C6; C; 00E6; # LATIN CAPITAL LETTER AE
+00C7; C; 00E7; # LATIN CAPITAL LETTER C WITH CEDILLA
+00C8; C; 00E8; # LATIN CAPITAL LETTER E WITH GRAVE
+00C9; C; 00E9; # LATIN CAPITAL LETTER E WITH ACUTE
+00CA; C; 00EA; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+00CB; C; 00EB; # LATIN CAPITAL LETTER E WITH DIAERESIS
+00CC; C; 00EC; # LATIN CAPITAL LETTER I WITH GRAVE
+00CD; C; 00ED; # LATIN CAPITAL LETTER I WITH ACUTE
+00CE; C; 00EE; # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+00CF; C; 00EF; # LATIN CAPITAL LETTER I WITH DIAERESIS
+00D0; C; 00F0; # LATIN CAPITAL LETTER ETH
+00D1; C; 00F1; # LATIN CAPITAL LETTER N WITH TILDE
+00D2; C; 00F2; # LATIN CAPITAL LETTER O WITH GRAVE
+00D3; C; 00F3; # LATIN CAPITAL LETTER O WITH ACUTE
+00D4; C; 00F4; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+00D5; C; 00F5; # LATIN CAPITAL LETTER O WITH TILDE
+00D6; C; 00F6; # LATIN CAPITAL LETTER O WITH DIAERESIS
+00D8; C; 00F8; # LATIN CAPITAL LETTER O WITH STROKE
+00D9; C; 00F9; # LATIN CAPITAL LETTER U WITH GRAVE
+00DA; C; 00FA; # LATIN CAPITAL LETTER U WITH ACUTE
+00DB; C; 00FB; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+00DC; C; 00FC; # LATIN CAPITAL LETTER U WITH DIAERESIS
+00DD; C; 00FD; # LATIN CAPITAL LETTER Y WITH ACUTE
+00DE; C; 00FE; # LATIN CAPITAL LETTER THORN
+00DF; F; 0073 0073; # LATIN SMALL LETTER SHARP S
+0100; C; 0101; # LATIN CAPITAL LETTER A WITH MACRON
+0102; C; 0103; # LATIN CAPITAL LETTER A WITH BREVE
+0104; C; 0105; # LATIN CAPITAL LETTER A WITH OGONEK
+0106; C; 0107; # LATIN CAPITAL LETTER C WITH ACUTE
+0108; C; 0109; # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+010A; C; 010B; # LATIN CAPITAL LETTER C WITH DOT ABOVE
+010C; C; 010D; # LATIN CAPITAL LETTER C WITH CARON
+010E; C; 010F; # LATIN CAPITAL LETTER D WITH CARON
+0110; C; 0111; # LATIN CAPITAL LETTER D WITH STROKE
+0112; C; 0113; # LATIN CAPITAL LETTER E WITH MACRON
+0114; C; 0115; # LATIN CAPITAL LETTER E WITH BREVE
+0116; C; 0117; # LATIN CAPITAL LETTER E WITH DOT ABOVE
+0118; C; 0119; # LATIN CAPITAL LETTER E WITH OGONEK
+011A; C; 011B; # LATIN CAPITAL LETTER E WITH CARON
+011C; C; 011D; # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+011E; C; 011F; # LATIN CAPITAL LETTER G WITH BREVE
+0120; C; 0121; # LATIN CAPITAL LETTER G WITH DOT ABOVE
+0122; C; 0123; # LATIN CAPITAL LETTER G WITH CEDILLA
+0124; C; 0125; # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+0126; C; 0127; # LATIN CAPITAL LETTER H WITH STROKE
+0128; C; 0129; # LATIN CAPITAL LETTER I WITH TILDE
+012A; C; 012B; # LATIN CAPITAL LETTER I WITH MACRON
+012C; C; 012D; # LATIN CAPITAL LETTER I WITH BREVE
+012E; C; 012F; # LATIN CAPITAL LETTER I WITH OGONEK
+0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0132; C; 0133; # LATIN CAPITAL LIGATURE IJ
+0134; C; 0135; # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+0136; C; 0137; # LATIN CAPITAL LETTER K WITH CEDILLA
+0139; C; 013A; # LATIN CAPITAL LETTER L WITH ACUTE
+013B; C; 013C; # LATIN CAPITAL LETTER L WITH CEDILLA
+013D; C; 013E; # LATIN CAPITAL LETTER L WITH CARON
+013F; C; 0140; # LATIN CAPITAL LETTER L WITH MIDDLE DOT
+0141; C; 0142; # LATIN CAPITAL LETTER L WITH STROKE
+0143; C; 0144; # LATIN CAPITAL LETTER N WITH ACUTE
+0145; C; 0146; # LATIN CAPITAL LETTER N WITH CEDILLA
+0147; C; 0148; # LATIN CAPITAL LETTER N WITH CARON
+0149; F; 02BC 006E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+014A; C; 014B; # LATIN CAPITAL LETTER ENG
+014C; C; 014D; # LATIN CAPITAL LETTER O WITH MACRON
+014E; C; 014F; # LATIN CAPITAL LETTER O WITH BREVE
+0150; C; 0151; # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+0152; C; 0153; # LATIN CAPITAL LIGATURE OE
+0154; C; 0155; # LATIN CAPITAL LETTER R WITH ACUTE
+0156; C; 0157; # LATIN CAPITAL LETTER R WITH CEDILLA
+0158; C; 0159; # LATIN CAPITAL LETTER R WITH CARON
+015A; C; 015B; # LATIN CAPITAL LETTER S WITH ACUTE
+015C; C; 015D; # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+015E; C; 015F; # LATIN CAPITAL LETTER S WITH CEDILLA
+0160; C; 0161; # LATIN CAPITAL LETTER S WITH CARON
+0162; C; 0163; # LATIN CAPITAL LETTER T WITH CEDILLA
+0164; C; 0165; # LATIN CAPITAL LETTER T WITH CARON
+0166; C; 0167; # LATIN CAPITAL LETTER T WITH STROKE
+0168; C; 0169; # LATIN CAPITAL LETTER U WITH TILDE
+016A; C; 016B; # LATIN CAPITAL LETTER U WITH MACRON
+016C; C; 016D; # LATIN CAPITAL LETTER U WITH BREVE
+016E; C; 016F; # LATIN CAPITAL LETTER U WITH RING ABOVE
+0170; C; 0171; # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+0172; C; 0173; # LATIN CAPITAL LETTER U WITH OGONEK
+0174; C; 0175; # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+0176; C; 0177; # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+0178; C; 00FF; # LATIN CAPITAL LETTER Y WITH DIAERESIS
+0179; C; 017A; # LATIN CAPITAL LETTER Z WITH ACUTE
+017B; C; 017C; # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+017D; C; 017E; # LATIN CAPITAL LETTER Z WITH CARON
+017F; C; 0073; # LATIN SMALL LETTER LONG S
+0181; C; 0253; # LATIN CAPITAL LETTER B WITH HOOK
+0182; C; 0183; # LATIN CAPITAL LETTER B WITH TOPBAR
+0184; C; 0185; # LATIN CAPITAL LETTER TONE SIX
+0186; C; 0254; # LATIN CAPITAL LETTER OPEN O
+0187; C; 0188; # LATIN CAPITAL LETTER C WITH HOOK
+0189; C; 0256; # LATIN CAPITAL LETTER AFRICAN D
+018A; C; 0257; # LATIN CAPITAL LETTER D WITH HOOK
+018B; C; 018C; # LATIN CAPITAL LETTER D WITH TOPBAR
+018E; C; 01DD; # LATIN CAPITAL LETTER REVERSED E
+018F; C; 0259; # LATIN CAPITAL LETTER SCHWA
+0190; C; 025B; # LATIN CAPITAL LETTER OPEN E
+0191; C; 0192; # LATIN CAPITAL LETTER F WITH HOOK
+0193; C; 0260; # LATIN CAPITAL LETTER G WITH HOOK
+0194; C; 0263; # LATIN CAPITAL LETTER GAMMA
+0196; C; 0269; # LATIN CAPITAL LETTER IOTA
+0197; C; 0268; # LATIN CAPITAL LETTER I WITH STROKE
+0198; C; 0199; # LATIN CAPITAL LETTER K WITH HOOK
+019C; C; 026F; # LATIN CAPITAL LETTER TURNED M
+019D; C; 0272; # LATIN CAPITAL LETTER N WITH LEFT HOOK
+019F; C; 0275; # LATIN CAPITAL LETTER O WITH MIDDLE TILDE
+01A0; C; 01A1; # LATIN CAPITAL LETTER O WITH HORN
+01A2; C; 01A3; # LATIN CAPITAL LETTER OI
+01A4; C; 01A5; # LATIN CAPITAL LETTER P WITH HOOK
+01A6; C; 0280; # LATIN LETTER YR
+01A7; C; 01A8; # LATIN CAPITAL LETTER TONE TWO
+01A9; C; 0283; # LATIN CAPITAL LETTER ESH
+01AC; C; 01AD; # LATIN CAPITAL LETTER T WITH HOOK
+01AE; C; 0288; # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK
+01AF; C; 01B0; # LATIN CAPITAL LETTER U WITH HORN
+01B1; C; 028A; # LATIN CAPITAL LETTER UPSILON
+01B2; C; 028B; # LATIN CAPITAL LETTER V WITH HOOK
+01B3; C; 01B4; # LATIN CAPITAL LETTER Y WITH HOOK
+01B5; C; 01B6; # LATIN CAPITAL LETTER Z WITH STROKE
+01B7; C; 0292; # LATIN CAPITAL LETTER EZH
+01B8; C; 01B9; # LATIN CAPITAL LETTER EZH REVERSED
+01BC; C; 01BD; # LATIN CAPITAL LETTER TONE FIVE
+01C4; C; 01C6; # LATIN CAPITAL LETTER DZ WITH CARON
+01C5; C; 01C6; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON
+01C7; C; 01C9; # LATIN CAPITAL LETTER LJ
+01C8; C; 01C9; # LATIN CAPITAL LETTER L WITH SMALL LETTER J
+01CA; C; 01CC; # LATIN CAPITAL LETTER NJ
+01CB; C; 01CC; # LATIN CAPITAL LETTER N WITH SMALL LETTER J
+01CD; C; 01CE; # LATIN CAPITAL LETTER A WITH CARON
+01CF; C; 01D0; # LATIN CAPITAL LETTER I WITH CARON
+01D1; C; 01D2; # LATIN CAPITAL LETTER O WITH CARON
+01D3; C; 01D4; # LATIN CAPITAL LETTER U WITH CARON
+01D5; C; 01D6; # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
+01D7; C; 01D8; # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
+01D9; C; 01DA; # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
+01DB; C; 01DC; # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
+01DE; C; 01DF; # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
+01E0; C; 01E1; # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
+01E2; C; 01E3; # LATIN CAPITAL LETTER AE WITH MACRON
+01E4; C; 01E5; # LATIN CAPITAL LETTER G WITH STROKE
+01E6; C; 01E7; # LATIN CAPITAL LETTER G WITH CARON
+01E8; C; 01E9; # LATIN CAPITAL LETTER K WITH CARON
+01EA; C; 01EB; # LATIN CAPITAL LETTER O WITH OGONEK
+01EC; C; 01ED; # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
+01EE; C; 01EF; # LATIN CAPITAL LETTER EZH WITH CARON
+01F0; F; 006A 030C; # LATIN SMALL LETTER J WITH CARON
+01F1; C; 01F3; # LATIN CAPITAL LETTER DZ
+01F2; C; 01F3; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z
+01F4; C; 01F5; # LATIN CAPITAL LETTER G WITH ACUTE
+01F6; C; 0195; # LATIN CAPITAL LETTER HWAIR
+01F7; C; 01BF; # LATIN CAPITAL LETTER WYNN
+01F8; C; 01F9; # LATIN CAPITAL LETTER N WITH GRAVE
+01FA; C; 01FB; # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+01FC; C; 01FD; # LATIN CAPITAL LETTER AE WITH ACUTE
+01FE; C; 01FF; # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+0200; C; 0201; # LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
+0202; C; 0203; # LATIN CAPITAL LETTER A WITH INVERTED BREVE
+0204; C; 0205; # LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
+0206; C; 0207; # LATIN CAPITAL LETTER E WITH INVERTED BREVE
+0208; C; 0209; # LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
+020A; C; 020B; # LATIN CAPITAL LETTER I WITH INVERTED BREVE
+020C; C; 020D; # LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
+020E; C; 020F; # LATIN CAPITAL LETTER O WITH INVERTED BREVE
+0210; C; 0211; # LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
+0212; C; 0213; # LATIN CAPITAL LETTER R WITH INVERTED BREVE
+0214; C; 0215; # LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
+0216; C; 0217; # LATIN CAPITAL LETTER U WITH INVERTED BREVE
+0218; C; 0219; # LATIN CAPITAL LETTER S WITH COMMA BELOW
+021A; C; 021B; # LATIN CAPITAL LETTER T WITH COMMA BELOW
+021C; C; 021D; # LATIN CAPITAL LETTER YOGH
+021E; C; 021F; # LATIN CAPITAL LETTER H WITH CARON
+0220; C; 019E; # LATIN CAPITAL LETTER N WITH LONG RIGHT LEG
+0222; C; 0223; # LATIN CAPITAL LETTER OU
+0224; C; 0225; # LATIN CAPITAL LETTER Z WITH HOOK
+0226; C; 0227; # LATIN CAPITAL LETTER A WITH DOT ABOVE
+0228; C; 0229; # LATIN CAPITAL LETTER E WITH CEDILLA
+022A; C; 022B; # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
+022C; C; 022D; # LATIN CAPITAL LETTER O WITH TILDE AND MACRON
+022E; C; 022F; # LATIN CAPITAL LETTER O WITH DOT ABOVE
+0230; C; 0231; # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
+0232; C; 0233; # LATIN CAPITAL LETTER Y WITH MACRON
+0345; C; 03B9; # COMBINING GREEK YPOGEGRAMMENI
+0386; C; 03AC; # GREEK CAPITAL LETTER ALPHA WITH TONOS
+0388; C; 03AD; # GREEK CAPITAL LETTER EPSILON WITH TONOS
+0389; C; 03AE; # GREEK CAPITAL LETTER ETA WITH TONOS
+038A; C; 03AF; # GREEK CAPITAL LETTER IOTA WITH TONOS
+038C; C; 03CC; # GREEK CAPITAL LETTER OMICRON WITH TONOS
+038E; C; 03CD; # GREEK CAPITAL LETTER UPSILON WITH TONOS
+038F; C; 03CE; # GREEK CAPITAL LETTER OMEGA WITH TONOS
+0390; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+0391; C; 03B1; # GREEK CAPITAL LETTER ALPHA
+0392; C; 03B2; # GREEK CAPITAL LETTER BETA
+0393; C; 03B3; # GREEK CAPITAL LETTER GAMMA
+0394; C; 03B4; # GREEK CAPITAL LETTER DELTA
+0395; C; 03B5; # GREEK CAPITAL LETTER EPSILON
+0396; C; 03B6; # GREEK CAPITAL LETTER ZETA
+0397; C; 03B7; # GREEK CAPITAL LETTER ETA
+0398; C; 03B8; # GREEK CAPITAL LETTER THETA
+0399; C; 03B9; # GREEK CAPITAL LETTER IOTA
+039A; C; 03BA; # GREEK CAPITAL LETTER KAPPA
+039B; C; 03BB; # GREEK CAPITAL LETTER LAMDA
+039C; C; 03BC; # GREEK CAPITAL LETTER MU
+039D; C; 03BD; # GREEK CAPITAL LETTER NU
+039E; C; 03BE; # GREEK CAPITAL LETTER XI
+039F; C; 03BF; # GREEK CAPITAL LETTER OMICRON
+03A0; C; 03C0; # GREEK CAPITAL LETTER PI
+03A1; C; 03C1; # GREEK CAPITAL LETTER RHO
+03A3; C; 03C3; # GREEK CAPITAL LETTER SIGMA
+03A4; C; 03C4; # GREEK CAPITAL LETTER TAU
+03A5; C; 03C5; # GREEK CAPITAL LETTER UPSILON
+03A6; C; 03C6; # GREEK CAPITAL LETTER PHI
+03A7; C; 03C7; # GREEK CAPITAL LETTER CHI
+03A8; C; 03C8; # GREEK CAPITAL LETTER PSI
+03A9; C; 03C9; # GREEK CAPITAL LETTER OMEGA
+03AA; C; 03CA; # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+03AB; C; 03CB; # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+03B0; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+03C2; C; 03C3; # GREEK SMALL LETTER FINAL SIGMA
+03D0; C; 03B2; # GREEK BETA SYMBOL
+03D1; C; 03B8; # GREEK THETA SYMBOL
+03D5; C; 03C6; # GREEK PHI SYMBOL
+03D6; C; 03C0; # GREEK PI SYMBOL
+03D8; C; 03D9; # GREEK LETTER ARCHAIC KOPPA
+03DA; C; 03DB; # GREEK LETTER STIGMA
+03DC; C; 03DD; # GREEK LETTER DIGAMMA
+03DE; C; 03DF; # GREEK LETTER KOPPA
+03E0; C; 03E1; # GREEK LETTER SAMPI
+03E2; C; 03E3; # COPTIC CAPITAL LETTER SHEI
+03E4; C; 03E5; # COPTIC CAPITAL LETTER FEI
+03E6; C; 03E7; # COPTIC CAPITAL LETTER KHEI
+03E8; C; 03E9; # COPTIC CAPITAL LETTER HORI
+03EA; C; 03EB; # COPTIC CAPITAL LETTER GANGIA
+03EC; C; 03ED; # COPTIC CAPITAL LETTER SHIMA
+03EE; C; 03EF; # COPTIC CAPITAL LETTER DEI
+03F0; C; 03BA; # GREEK KAPPA SYMBOL
+03F1; C; 03C1; # GREEK RHO SYMBOL
+03F2; C; 03C3; # GREEK LUNATE SIGMA SYMBOL
+03F4; C; 03B8; # GREEK CAPITAL THETA SYMBOL
+03F5; C; 03B5; # GREEK LUNATE EPSILON SYMBOL
+0400; C; 0450; # CYRILLIC CAPITAL LETTER IE WITH GRAVE
+0401; C; 0451; # CYRILLIC CAPITAL LETTER IO
+0402; C; 0452; # CYRILLIC CAPITAL LETTER DJE
+0403; C; 0453; # CYRILLIC CAPITAL LETTER GJE
+0404; C; 0454; # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+0405; C; 0455; # CYRILLIC CAPITAL LETTER DZE
+0406; C; 0456; # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+0407; C; 0457; # CYRILLIC CAPITAL LETTER YI
+0408; C; 0458; # CYRILLIC CAPITAL LETTER JE
+0409; C; 0459; # CYRILLIC CAPITAL LETTER LJE
+040A; C; 045A; # CYRILLIC CAPITAL LETTER NJE
+040B; C; 045B; # CYRILLIC CAPITAL LETTER TSHE
+040C; C; 045C; # CYRILLIC CAPITAL LETTER KJE
+040D; C; 045D; # CYRILLIC CAPITAL LETTER I WITH GRAVE
+040E; C; 045E; # CYRILLIC CAPITAL LETTER SHORT U
+040F; C; 045F; # CYRILLIC CAPITAL LETTER DZHE
+0410; C; 0430; # CYRILLIC CAPITAL LETTER A
+0411; C; 0431; # CYRILLIC CAPITAL LETTER BE
+0412; C; 0432; # CYRILLIC CAPITAL LETTER VE
+0413; C; 0433; # CYRILLIC CAPITAL LETTER GHE
+0414; C; 0434; # CYRILLIC CAPITAL LETTER DE
+0415; C; 0435; # CYRILLIC CAPITAL LETTER IE
+0416; C; 0436; # CYRILLIC CAPITAL LETTER ZHE
+0417; C; 0437; # CYRILLIC CAPITAL LETTER ZE
+0418; C; 0438; # CYRILLIC CAPITAL LETTER I
+0419; C; 0439; # CYRILLIC CAPITAL LETTER SHORT I
+041A; C; 043A; # CYRILLIC CAPITAL LETTER KA
+041B; C; 043B; # CYRILLIC CAPITAL LETTER EL
+041C; C; 043C; # CYRILLIC CAPITAL LETTER EM
+041D; C; 043D; # CYRILLIC CAPITAL LETTER EN
+041E; C; 043E; # CYRILLIC CAPITAL LETTER O
+041F; C; 043F; # CYRILLIC CAPITAL LETTER PE
+0420; C; 0440; # CYRILLIC CAPITAL LETTER ER
+0421; C; 0441; # CYRILLIC CAPITAL LETTER ES
+0422; C; 0442; # CYRILLIC CAPITAL LETTER TE
+0423; C; 0443; # CYRILLIC CAPITAL LETTER U
+0424; C; 0444; # CYRILLIC CAPITAL LETTER EF
+0425; C; 0445; # CYRILLIC CAPITAL LETTER HA
+0426; C; 0446; # CYRILLIC CAPITAL LETTER TSE
+0427; C; 0447; # CYRILLIC CAPITAL LETTER CHE
+0428; C; 0448; # CYRILLIC CAPITAL LETTER SHA
+0429; C; 0449; # CYRILLIC CAPITAL LETTER SHCHA
+042A; C; 044A; # CYRILLIC CAPITAL LETTER HARD SIGN
+042B; C; 044B; # CYRILLIC CAPITAL LETTER YERU
+042C; C; 044C; # CYRILLIC CAPITAL LETTER SOFT SIGN
+042D; C; 044D; # CYRILLIC CAPITAL LETTER E
+042E; C; 044E; # CYRILLIC CAPITAL LETTER YU
+042F; C; 044F; # CYRILLIC CAPITAL LETTER YA
+0460; C; 0461; # CYRILLIC CAPITAL LETTER OMEGA
+0462; C; 0463; # CYRILLIC CAPITAL LETTER YAT
+0464; C; 0465; # CYRILLIC CAPITAL LETTER IOTIFIED E
+0466; C; 0467; # CYRILLIC CAPITAL LETTER LITTLE YUS
+0468; C; 0469; # CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS
+046A; C; 046B; # CYRILLIC CAPITAL LETTER BIG YUS
+046C; C; 046D; # CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS
+046E; C; 046F; # CYRILLIC CAPITAL LETTER KSI
+0470; C; 0471; # CYRILLIC CAPITAL LETTER PSI
+0472; C; 0473; # CYRILLIC CAPITAL LETTER FITA
+0474; C; 0475; # CYRILLIC CAPITAL LETTER IZHITSA
+0476; C; 0477; # CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
+0478; C; 0479; # CYRILLIC CAPITAL LETTER UK
+047A; C; 047B; # CYRILLIC CAPITAL LETTER ROUND OMEGA
+047C; C; 047D; # CYRILLIC CAPITAL LETTER OMEGA WITH TITLO
+047E; C; 047F; # CYRILLIC CAPITAL LETTER OT
+0480; C; 0481; # CYRILLIC CAPITAL LETTER KOPPA
+048A; C; 048B; # CYRILLIC CAPITAL LETTER SHORT I WITH TAIL
+048C; C; 048D; # CYRILLIC CAPITAL LETTER SEMISOFT SIGN
+048E; C; 048F; # CYRILLIC CAPITAL LETTER ER WITH TICK
+0490; C; 0491; # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+0492; C; 0493; # CYRILLIC CAPITAL LETTER GHE WITH STROKE
+0494; C; 0495; # CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK
+0496; C; 0497; # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
+0498; C; 0499; # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
+049A; C; 049B; # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
+049C; C; 049D; # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
+049E; C; 049F; # CYRILLIC CAPITAL LETTER KA WITH STROKE
+04A0; C; 04A1; # CYRILLIC CAPITAL LETTER BASHKIR KA
+04A2; C; 04A3; # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
+04A4; C; 04A5; # CYRILLIC CAPITAL LIGATURE EN GHE
+04A6; C; 04A7; # CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK
+04A8; C; 04A9; # CYRILLIC CAPITAL LETTER ABKHASIAN HA
+04AA; C; 04AB; # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
+04AC; C; 04AD; # CYRILLIC CAPITAL LETTER TE WITH DESCENDER
+04AE; C; 04AF; # CYRILLIC CAPITAL LETTER STRAIGHT U
+04B0; C; 04B1; # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
+04B2; C; 04B3; # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
+04B4; C; 04B5; # CYRILLIC CAPITAL LIGATURE TE TSE
+04B6; C; 04B7; # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
+04B8; C; 04B9; # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
+04BA; C; 04BB; # CYRILLIC CAPITAL LETTER SHHA
+04BC; C; 04BD; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE
+04BE; C; 04BF; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER
+04C1; C; 04C2; # CYRILLIC CAPITAL LETTER ZHE WITH BREVE
+04C3; C; 04C4; # CYRILLIC CAPITAL LETTER KA WITH HOOK
+04C5; C; 04C6; # CYRILLIC CAPITAL LETTER EL WITH TAIL
+04C7; C; 04C8; # CYRILLIC CAPITAL LETTER EN WITH HOOK
+04C9; C; 04CA; # CYRILLIC CAPITAL LETTER EN WITH TAIL
+04CB; C; 04CC; # CYRILLIC CAPITAL LETTER KHAKASSIAN CHE
+04CD; C; 04CE; # CYRILLIC CAPITAL LETTER EM WITH TAIL
+04D0; C; 04D1; # CYRILLIC CAPITAL LETTER A WITH BREVE
+04D2; C; 04D3; # CYRILLIC CAPITAL LETTER A WITH DIAERESIS
+04D4; C; 04D5; # CYRILLIC CAPITAL LIGATURE A IE
+04D6; C; 04D7; # CYRILLIC CAPITAL LETTER IE WITH BREVE
+04D8; C; 04D9; # CYRILLIC CAPITAL LETTER SCHWA
+04DA; C; 04DB; # CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
+04DC; C; 04DD; # CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
+04DE; C; 04DF; # CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
+04E0; C; 04E1; # CYRILLIC CAPITAL LETTER ABKHASIAN DZE
+04E2; C; 04E3; # CYRILLIC CAPITAL LETTER I WITH MACRON
+04E4; C; 04E5; # CYRILLIC CAPITAL LETTER I WITH DIAERESIS
+04E6; C; 04E7; # CYRILLIC CAPITAL LETTER O WITH DIAERESIS
+04E8; C; 04E9; # CYRILLIC CAPITAL LETTER BARRED O
+04EA; C; 04EB; # CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS
+04EC; C; 04ED; # CYRILLIC CAPITAL LETTER E WITH DIAERESIS
+04EE; C; 04EF; # CYRILLIC CAPITAL LETTER U WITH MACRON
+04F0; C; 04F1; # CYRILLIC CAPITAL LETTER U WITH DIAERESIS
+04F2; C; 04F3; # CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
+04F4; C; 04F5; # CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
+04F8; C; 04F9; # CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
+0500; C; 0501; # CYRILLIC CAPITAL LETTER KOMI DE
+0502; C; 0503; # CYRILLIC CAPITAL LETTER KOMI DJE
+0504; C; 0505; # CYRILLIC CAPITAL LETTER KOMI ZJE
+0506; C; 0507; # CYRILLIC CAPITAL LETTER KOMI DZJE
+0508; C; 0509; # CYRILLIC CAPITAL LETTER KOMI LJE
+050A; C; 050B; # CYRILLIC CAPITAL LETTER KOMI NJE
+050C; C; 050D; # CYRILLIC CAPITAL LETTER KOMI SJE
+050E; C; 050F; # CYRILLIC CAPITAL LETTER KOMI TJE
+0531; C; 0561; # ARMENIAN CAPITAL LETTER AYB
+0532; C; 0562; # ARMENIAN CAPITAL LETTER BEN
+0533; C; 0563; # ARMENIAN CAPITAL LETTER GIM
+0534; C; 0564; # ARMENIAN CAPITAL LETTER DA
+0535; C; 0565; # ARMENIAN CAPITAL LETTER ECH
+0536; C; 0566; # ARMENIAN CAPITAL LETTER ZA
+0537; C; 0567; # ARMENIAN CAPITAL LETTER EH
+0538; C; 0568; # ARMENIAN CAPITAL LETTER ET
+0539; C; 0569; # ARMENIAN CAPITAL LETTER TO
+053A; C; 056A; # ARMENIAN CAPITAL LETTER ZHE
+053B; C; 056B; # ARMENIAN CAPITAL LETTER INI
+053C; C; 056C; # ARMENIAN CAPITAL LETTER LIWN
+053D; C; 056D; # ARMENIAN CAPITAL LETTER XEH
+053E; C; 056E; # ARMENIAN CAPITAL LETTER CA
+053F; C; 056F; # ARMENIAN CAPITAL LETTER KEN
+0540; C; 0570; # ARMENIAN CAPITAL LETTER HO
+0541; C; 0571; # ARMENIAN CAPITAL LETTER JA
+0542; C; 0572; # ARMENIAN CAPITAL LETTER GHAD
+0543; C; 0573; # ARMENIAN CAPITAL LETTER CHEH
+0544; C; 0574; # ARMENIAN CAPITAL LETTER MEN
+0545; C; 0575; # ARMENIAN CAPITAL LETTER YI
+0546; C; 0576; # ARMENIAN CAPITAL LETTER NOW
+0547; C; 0577; # ARMENIAN CAPITAL LETTER SHA
+0548; C; 0578; # ARMENIAN CAPITAL LETTER VO
+0549; C; 0579; # ARMENIAN CAPITAL LETTER CHA
+054A; C; 057A; # ARMENIAN CAPITAL LETTER PEH
+054B; C; 057B; # ARMENIAN CAPITAL LETTER JHEH
+054C; C; 057C; # ARMENIAN CAPITAL LETTER RA
+054D; C; 057D; # ARMENIAN CAPITAL LETTER SEH
+054E; C; 057E; # ARMENIAN CAPITAL LETTER VEW
+054F; C; 057F; # ARMENIAN CAPITAL LETTER TIWN
+0550; C; 0580; # ARMENIAN CAPITAL LETTER REH
+0551; C; 0581; # ARMENIAN CAPITAL LETTER CO
+0552; C; 0582; # ARMENIAN CAPITAL LETTER YIWN
+0553; C; 0583; # ARMENIAN CAPITAL LETTER PIWR
+0554; C; 0584; # ARMENIAN CAPITAL LETTER KEH
+0555; C; 0585; # ARMENIAN CAPITAL LETTER OH
+0556; C; 0586; # ARMENIAN CAPITAL LETTER FEH
+0587; F; 0565 0582; # ARMENIAN SMALL LIGATURE ECH YIWN
+1E00; C; 1E01; # LATIN CAPITAL LETTER A WITH RING BELOW
+1E02; C; 1E03; # LATIN CAPITAL LETTER B WITH DOT ABOVE
+1E04; C; 1E05; # LATIN CAPITAL LETTER B WITH DOT BELOW
+1E06; C; 1E07; # LATIN CAPITAL LETTER B WITH LINE BELOW
+1E08; C; 1E09; # LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
+1E0A; C; 1E0B; # LATIN CAPITAL LETTER D WITH DOT ABOVE
+1E0C; C; 1E0D; # LATIN CAPITAL LETTER D WITH DOT BELOW
+1E0E; C; 1E0F; # LATIN CAPITAL LETTER D WITH LINE BELOW
+1E10; C; 1E11; # LATIN CAPITAL LETTER D WITH CEDILLA
+1E12; C; 1E13; # LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
+1E14; C; 1E15; # LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
+1E16; C; 1E17; # LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
+1E18; C; 1E19; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
+1E1A; C; 1E1B; # LATIN CAPITAL LETTER E WITH TILDE BELOW
+1E1C; C; 1E1D; # LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
+1E1E; C; 1E1F; # LATIN CAPITAL LETTER F WITH DOT ABOVE
+1E20; C; 1E21; # LATIN CAPITAL LETTER G WITH MACRON
+1E22; C; 1E23; # LATIN CAPITAL LETTER H WITH DOT ABOVE
+1E24; C; 1E25; # LATIN CAPITAL LETTER H WITH DOT BELOW
+1E26; C; 1E27; # LATIN CAPITAL LETTER H WITH DIAERESIS
+1E28; C; 1E29; # LATIN CAPITAL LETTER H WITH CEDILLA
+1E2A; C; 1E2B; # LATIN CAPITAL LETTER H WITH BREVE BELOW
+1E2C; C; 1E2D; # LATIN CAPITAL LETTER I WITH TILDE BELOW
+1E2E; C; 1E2F; # LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
+1E30; C; 1E31; # LATIN CAPITAL LETTER K WITH ACUTE
+1E32; C; 1E33; # LATIN CAPITAL LETTER K WITH DOT BELOW
+1E34; C; 1E35; # LATIN CAPITAL LETTER K WITH LINE BELOW
+1E36; C; 1E37; # LATIN CAPITAL LETTER L WITH DOT BELOW
+1E38; C; 1E39; # LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
+1E3A; C; 1E3B; # LATIN CAPITAL LETTER L WITH LINE BELOW
+1E3C; C; 1E3D; # LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
+1E3E; C; 1E3F; # LATIN CAPITAL LETTER M WITH ACUTE
+1E40; C; 1E41; # LATIN CAPITAL LETTER M WITH DOT ABOVE
+1E42; C; 1E43; # LATIN CAPITAL LETTER M WITH DOT BELOW
+1E44; C; 1E45; # LATIN CAPITAL LETTER N WITH DOT ABOVE
+1E46; C; 1E47; # LATIN CAPITAL LETTER N WITH DOT BELOW
+1E48; C; 1E49; # LATIN CAPITAL LETTER N WITH LINE BELOW
+1E4A; C; 1E4B; # LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
+1E4C; C; 1E4D; # LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
+1E4E; C; 1E4F; # LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
+1E50; C; 1E51; # LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
+1E52; C; 1E53; # LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
+1E54; C; 1E55; # LATIN CAPITAL LETTER P WITH ACUTE
+1E56; C; 1E57; # LATIN CAPITAL LETTER P WITH DOT ABOVE
+1E58; C; 1E59; # LATIN CAPITAL LETTER R WITH DOT ABOVE
+1E5A; C; 1E5B; # LATIN CAPITAL LETTER R WITH DOT BELOW
+1E5C; C; 1E5D; # LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
+1E5E; C; 1E5F; # LATIN CAPITAL LETTER R WITH LINE BELOW
+1E60; C; 1E61; # LATIN CAPITAL LETTER S WITH DOT ABOVE
+1E62; C; 1E63; # LATIN CAPITAL LETTER S WITH DOT BELOW
+1E64; C; 1E65; # LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
+1E66; C; 1E67; # LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
+1E68; C; 1E69; # LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
+1E6A; C; 1E6B; # LATIN CAPITAL LETTER T WITH DOT ABOVE
+1E6C; C; 1E6D; # LATIN CAPITAL LETTER T WITH DOT BELOW
+1E6E; C; 1E6F; # LATIN CAPITAL LETTER T WITH LINE BELOW
+1E70; C; 1E71; # LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
+1E72; C; 1E73; # LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
+1E74; C; 1E75; # LATIN CAPITAL LETTER U WITH TILDE BELOW
+1E76; C; 1E77; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
+1E78; C; 1E79; # LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
+1E7A; C; 1E7B; # LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
+1E7C; C; 1E7D; # LATIN CAPITAL LETTER V WITH TILDE
+1E7E; C; 1E7F; # LATIN CAPITAL LETTER V WITH DOT BELOW
+1E80; C; 1E81; # LATIN CAPITAL LETTER W WITH GRAVE
+1E82; C; 1E83; # LATIN CAPITAL LETTER W WITH ACUTE
+1E84; C; 1E85; # LATIN CAPITAL LETTER W WITH DIAERESIS
+1E86; C; 1E87; # LATIN CAPITAL LETTER W WITH DOT ABOVE
+1E88; C; 1E89; # LATIN CAPITAL LETTER W WITH DOT BELOW
+1E8A; C; 1E8B; # LATIN CAPITAL LETTER X WITH DOT ABOVE
+1E8C; C; 1E8D; # LATIN CAPITAL LETTER X WITH DIAERESIS
+1E8E; C; 1E8F; # LATIN CAPITAL LETTER Y WITH DOT ABOVE
+1E90; C; 1E91; # LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
+1E92; C; 1E93; # LATIN CAPITAL LETTER Z WITH DOT BELOW
+1E94; C; 1E95; # LATIN CAPITAL LETTER Z WITH LINE BELOW
+1E96; F; 0068 0331; # LATIN SMALL LETTER H WITH LINE BELOW
+1E97; F; 0074 0308; # LATIN SMALL LETTER T WITH DIAERESIS
+1E98; F; 0077 030A; # LATIN SMALL LETTER W WITH RING ABOVE
+1E99; F; 0079 030A; # LATIN SMALL LETTER Y WITH RING ABOVE
+1E9A; F; 0061 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING
+1E9B; C; 1E61; # LATIN SMALL LETTER LONG S WITH DOT ABOVE
+1EA0; C; 1EA1; # LATIN CAPITAL LETTER A WITH DOT BELOW
+1EA2; C; 1EA3; # LATIN CAPITAL LETTER A WITH HOOK ABOVE
+1EA4; C; 1EA5; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
+1EA6; C; 1EA7; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
+1EA8; C; 1EA9; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
+1EAA; C; 1EAB; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
+1EAC; C; 1EAD; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
+1EAE; C; 1EAF; # LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
+1EB0; C; 1EB1; # LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
+1EB2; C; 1EB3; # LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
+1EB4; C; 1EB5; # LATIN CAPITAL LETTER A WITH BREVE AND TILDE
+1EB6; C; 1EB7; # LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
+1EB8; C; 1EB9; # LATIN CAPITAL LETTER E WITH DOT BELOW
+1EBA; C; 1EBB; # LATIN CAPITAL LETTER E WITH HOOK ABOVE
+1EBC; C; 1EBD; # LATIN CAPITAL LETTER E WITH TILDE
+1EBE; C; 1EBF; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
+1EC0; C; 1EC1; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
+1EC2; C; 1EC3; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
+1EC4; C; 1EC5; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
+1EC6; C; 1EC7; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
+1EC8; C; 1EC9; # LATIN CAPITAL LETTER I WITH HOOK ABOVE
+1ECA; C; 1ECB; # LATIN CAPITAL LETTER I WITH DOT BELOW
+1ECC; C; 1ECD; # LATIN CAPITAL LETTER O WITH DOT BELOW
+1ECE; C; 1ECF; # LATIN CAPITAL LETTER O WITH HOOK ABOVE
+1ED0; C; 1ED1; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
+1ED2; C; 1ED3; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
+1ED4; C; 1ED5; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
+1ED6; C; 1ED7; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
+1ED8; C; 1ED9; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
+1EDA; C; 1EDB; # LATIN CAPITAL LETTER O WITH HORN AND ACUTE
+1EDC; C; 1EDD; # LATIN CAPITAL LETTER O WITH HORN AND GRAVE
+1EDE; C; 1EDF; # LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
+1EE0; C; 1EE1; # LATIN CAPITAL LETTER O WITH HORN AND TILDE
+1EE2; C; 1EE3; # LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
+1EE4; C; 1EE5; # LATIN CAPITAL LETTER U WITH DOT BELOW
+1EE6; C; 1EE7; # LATIN CAPITAL LETTER U WITH HOOK ABOVE
+1EE8; C; 1EE9; # LATIN CAPITAL LETTER U WITH HORN AND ACUTE
+1EEA; C; 1EEB; # LATIN CAPITAL LETTER U WITH HORN AND GRAVE
+1EEC; C; 1EED; # LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
+1EEE; C; 1EEF; # LATIN CAPITAL LETTER U WITH HORN AND TILDE
+1EF0; C; 1EF1; # LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
+1EF2; C; 1EF3; # LATIN CAPITAL LETTER Y WITH GRAVE
+1EF4; C; 1EF5; # LATIN CAPITAL LETTER Y WITH DOT BELOW
+1EF6; C; 1EF7; # LATIN CAPITAL LETTER Y WITH HOOK ABOVE
+1EF8; C; 1EF9; # LATIN CAPITAL LETTER Y WITH TILDE
+1F08; C; 1F00; # GREEK CAPITAL LETTER ALPHA WITH PSILI
+1F09; C; 1F01; # GREEK CAPITAL LETTER ALPHA WITH DASIA
+1F0A; C; 1F02; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA
+1F0B; C; 1F03; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA
+1F0C; C; 1F04; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA
+1F0D; C; 1F05; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA
+1F0E; C; 1F06; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI
+1F0F; C; 1F07; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI
+1F18; C; 1F10; # GREEK CAPITAL LETTER EPSILON WITH PSILI
+1F19; C; 1F11; # GREEK CAPITAL LETTER EPSILON WITH DASIA
+1F1A; C; 1F12; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND VARIA
+1F1B; C; 1F13; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND VARIA
+1F1C; C; 1F14; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND OXIA
+1F1D; C; 1F15; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA
+1F28; C; 1F20; # GREEK CAPITAL LETTER ETA WITH PSILI
+1F29; C; 1F21; # GREEK CAPITAL LETTER ETA WITH DASIA
+1F2A; C; 1F22; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA
+1F2B; C; 1F23; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA
+1F2C; C; 1F24; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA
+1F2D; C; 1F25; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA
+1F2E; C; 1F26; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI
+1F2F; C; 1F27; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI
+1F38; C; 1F30; # GREEK CAPITAL LETTER IOTA WITH PSILI
+1F39; C; 1F31; # GREEK CAPITAL LETTER IOTA WITH DASIA
+1F3A; C; 1F32; # GREEK CAPITAL LETTER IOTA WITH PSILI AND VARIA
+1F3B; C; 1F33; # GREEK CAPITAL LETTER IOTA WITH DASIA AND VARIA
+1F3C; C; 1F34; # GREEK CAPITAL LETTER IOTA WITH PSILI AND OXIA
+1F3D; C; 1F35; # GREEK CAPITAL LETTER IOTA WITH DASIA AND OXIA
+1F3E; C; 1F36; # GREEK CAPITAL LETTER IOTA WITH PSILI AND PERISPOMENI
+1F3F; C; 1F37; # GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI
+1F48; C; 1F40; # GREEK CAPITAL LETTER OMICRON WITH PSILI
+1F49; C; 1F41; # GREEK CAPITAL LETTER OMICRON WITH DASIA
+1F4A; C; 1F42; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND VARIA
+1F4B; C; 1F43; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND VARIA
+1F4C; C; 1F44; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND OXIA
+1F4D; C; 1F45; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA
+1F50; F; 03C5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI
+1F52; F; 03C5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA
+1F54; F; 03C5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA
+1F56; F; 03C5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI
+1F59; C; 1F51; # GREEK CAPITAL LETTER UPSILON WITH DASIA
+1F5B; C; 1F53; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA
+1F5D; C; 1F55; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA
+1F5F; C; 1F57; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI
+1F68; C; 1F60; # GREEK CAPITAL LETTER OMEGA WITH PSILI
+1F69; C; 1F61; # GREEK CAPITAL LETTER OMEGA WITH DASIA
+1F6A; C; 1F62; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA
+1F6B; C; 1F63; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA
+1F6C; C; 1F64; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA
+1F6D; C; 1F65; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA
+1F6E; C; 1F66; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI
+1F6F; C; 1F67; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI
+1F80; F; 1F00 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI
+1F81; F; 1F01 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI
+1F82; F; 1F02 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F83; F; 1F03 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F84; F; 1F04 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F85; F; 1F05 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F86; F; 1F06 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F87; F; 1F07 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F88; F; 1F00 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F88; S; 1F80; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F89; F; 1F01 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F89; S; 1F81; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F8A; F; 1F02 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8A; S; 1F82; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8B; F; 1F03 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8B; S; 1F83; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8C; F; 1F04 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8C; S; 1F84; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8D; F; 1F05 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8D; S; 1F85; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8E; F; 1F06 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8E; S; 1F86; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; F; 1F07 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; S; 1F87; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F90; F; 1F20 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI
+1F91; F; 1F21 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI
+1F92; F; 1F22 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F93; F; 1F23 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F94; F; 1F24 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F95; F; 1F25 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F96; F; 1F26 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F97; F; 1F27 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F98; F; 1F20 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F98; S; 1F90; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F99; F; 1F21 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F99; S; 1F91; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F9A; F; 1F22 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9A; S; 1F92; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9B; F; 1F23 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9B; S; 1F93; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9C; F; 1F24 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9C; S; 1F94; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9D; F; 1F25 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9D; S; 1F95; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9E; F; 1F26 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9E; S; 1F96; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; F; 1F27 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; S; 1F97; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FA0; F; 1F60 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI
+1FA1; F; 1F61 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI
+1FA2; F; 1F62 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1FA3; F; 1F63 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1FA4; F; 1F64 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1FA5; F; 1F65 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1FA6; F; 1F66 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1FA7; F; 1F67 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1FA8; F; 1F60 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA8; S; 1FA0; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA9; F; 1F61 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FA9; S; 1FA1; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FAA; F; 1F62 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAA; S; 1FA2; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAB; F; 1F63 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAB; S; 1FA3; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAC; F; 1F64 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAC; S; 1FA4; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAD; F; 1F65 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAD; S; 1FA5; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAE; F; 1F66 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAE; S; 1FA6; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; F; 1F67 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; S; 1FA7; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FB2; F; 1F70 03B9; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI
+1FB3; F; 03B1 03B9; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI
+1FB4; F; 03AC 03B9; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI
+1FB6; F; 03B1 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI
+1FB7; F; 03B1 0342 03B9; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FB8; C; 1FB0; # GREEK CAPITAL LETTER ALPHA WITH VRACHY
+1FB9; C; 1FB1; # GREEK CAPITAL LETTER ALPHA WITH MACRON
+1FBA; C; 1F70; # GREEK CAPITAL LETTER ALPHA WITH VARIA
+1FBB; C; 1F71; # GREEK CAPITAL LETTER ALPHA WITH OXIA
+1FBC; F; 03B1 03B9; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBC; S; 1FB3; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBE; C; 03B9; # GREEK PROSGEGRAMMENI
+1FC2; F; 1F74 03B9; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI
+1FC3; F; 03B7 03B9; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI
+1FC4; F; 03AE 03B9; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI
+1FC6; F; 03B7 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI
+1FC7; F; 03B7 0342 03B9; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FC8; C; 1F72; # GREEK CAPITAL LETTER EPSILON WITH VARIA
+1FC9; C; 1F73; # GREEK CAPITAL LETTER EPSILON WITH OXIA
+1FCA; C; 1F74; # GREEK CAPITAL LETTER ETA WITH VARIA
+1FCB; C; 1F75; # GREEK CAPITAL LETTER ETA WITH OXIA
+1FCC; F; 03B7 03B9; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FCC; S; 1FC3; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FD2; F; 03B9 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA
+1FD3; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
+1FD6; F; 03B9 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI
+1FD7; F; 03B9 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI
+1FD8; C; 1FD0; # GREEK CAPITAL LETTER IOTA WITH VRACHY
+1FD9; C; 1FD1; # GREEK CAPITAL LETTER IOTA WITH MACRON
+1FDA; C; 1F76; # GREEK CAPITAL LETTER IOTA WITH VARIA
+1FDB; C; 1F77; # GREEK CAPITAL LETTER IOTA WITH OXIA
+1FE2; F; 03C5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA
+1FE3; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
+1FE4; F; 03C1 0313; # GREEK SMALL LETTER RHO WITH PSILI
+1FE6; F; 03C5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI
+1FE7; F; 03C5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI
+1FE8; C; 1FE0; # GREEK CAPITAL LETTER UPSILON WITH VRACHY
+1FE9; C; 1FE1; # GREEK CAPITAL LETTER UPSILON WITH MACRON
+1FEA; C; 1F7A; # GREEK CAPITAL LETTER UPSILON WITH VARIA
+1FEB; C; 1F7B; # GREEK CAPITAL LETTER UPSILON WITH OXIA
+1FEC; C; 1FE5; # GREEK CAPITAL LETTER RHO WITH DASIA
+1FF2; F; 1F7C 03B9; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI
+1FF3; F; 03C9 03B9; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI
+1FF4; F; 03CE 03B9; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI
+1FF6; F; 03C9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI
+1FF7; F; 03C9 0342 03B9; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FF8; C; 1F78; # GREEK CAPITAL LETTER OMICRON WITH VARIA
+1FF9; C; 1F79; # GREEK CAPITAL LETTER OMICRON WITH OXIA
+1FFA; C; 1F7C; # GREEK CAPITAL LETTER OMEGA WITH VARIA
+1FFB; C; 1F7D; # GREEK CAPITAL LETTER OMEGA WITH OXIA
+1FFC; F; 03C9 03B9; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+1FFC; S; 1FF3; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+2126; C; 03C9; # OHM SIGN
+212A; C; 006B; # KELVIN SIGN
+212B; C; 00E5; # ANGSTROM SIGN
+2160; C; 2170; # ROMAN NUMERAL ONE
+2161; C; 2171; # ROMAN NUMERAL TWO
+2162; C; 2172; # ROMAN NUMERAL THREE
+2163; C; 2173; # ROMAN NUMERAL FOUR
+2164; C; 2174; # ROMAN NUMERAL FIVE
+2165; C; 2175; # ROMAN NUMERAL SIX
+2166; C; 2176; # ROMAN NUMERAL SEVEN
+2167; C; 2177; # ROMAN NUMERAL EIGHT
+2168; C; 2178; # ROMAN NUMERAL NINE
+2169; C; 2179; # ROMAN NUMERAL TEN
+216A; C; 217A; # ROMAN NUMERAL ELEVEN
+216B; C; 217B; # ROMAN NUMERAL TWELVE
+216C; C; 217C; # ROMAN NUMERAL FIFTY
+216D; C; 217D; # ROMAN NUMERAL ONE HUNDRED
+216E; C; 217E; # ROMAN NUMERAL FIVE HUNDRED
+216F; C; 217F; # ROMAN NUMERAL ONE THOUSAND
+24B6; C; 24D0; # CIRCLED LATIN CAPITAL LETTER A
+24B7; C; 24D1; # CIRCLED LATIN CAPITAL LETTER B
+24B8; C; 24D2; # CIRCLED LATIN CAPITAL LETTER C
+24B9; C; 24D3; # CIRCLED LATIN CAPITAL LETTER D
+24BA; C; 24D4; # CIRCLED LATIN CAPITAL LETTER E
+24BB; C; 24D5; # CIRCLED LATIN CAPITAL LETTER F
+24BC; C; 24D6; # CIRCLED LATIN CAPITAL LETTER G
+24BD; C; 24D7; # CIRCLED LATIN CAPITAL LETTER H
+24BE; C; 24D8; # CIRCLED LATIN CAPITAL LETTER I
+24BF; C; 24D9; # CIRCLED LATIN CAPITAL LETTER J
+24C0; C; 24DA; # CIRCLED LATIN CAPITAL LETTER K
+24C1; C; 24DB; # CIRCLED LATIN CAPITAL LETTER L
+24C2; C; 24DC; # CIRCLED LATIN CAPITAL LETTER M
+24C3; C; 24DD; # CIRCLED LATIN CAPITAL LETTER N
+24C4; C; 24DE; # CIRCLED LATIN CAPITAL LETTER O
+24C5; C; 24DF; # CIRCLED LATIN CAPITAL LETTER P
+24C6; C; 24E0; # CIRCLED LATIN CAPITAL LETTER Q
+24C7; C; 24E1; # CIRCLED LATIN CAPITAL LETTER R
+24C8; C; 24E2; # CIRCLED LATIN CAPITAL LETTER S
+24C9; C; 24E3; # CIRCLED LATIN CAPITAL LETTER T
+24CA; C; 24E4; # CIRCLED LATIN CAPITAL LETTER U
+24CB; C; 24E5; # CIRCLED LATIN CAPITAL LETTER V
+24CC; C; 24E6; # CIRCLED LATIN CAPITAL LETTER W
+24CD; C; 24E7; # CIRCLED LATIN CAPITAL LETTER X
+24CE; C; 24E8; # CIRCLED LATIN CAPITAL LETTER Y
+24CF; C; 24E9; # CIRCLED LATIN CAPITAL LETTER Z
+FB00; F; 0066 0066; # LATIN SMALL LIGATURE FF
+FB01; F; 0066 0069; # LATIN SMALL LIGATURE FI
+FB02; F; 0066 006C; # LATIN SMALL LIGATURE FL
+FB03; F; 0066 0066 0069; # LATIN SMALL LIGATURE FFI
+FB04; F; 0066 0066 006C; # LATIN SMALL LIGATURE FFL
+FB05; F; 0073 0074; # LATIN SMALL LIGATURE LONG S T
+FB06; F; 0073 0074; # LATIN SMALL LIGATURE ST
+FB13; F; 0574 0576; # ARMENIAN SMALL LIGATURE MEN NOW
+FB14; F; 0574 0565; # ARMENIAN SMALL LIGATURE MEN ECH
+FB15; F; 0574 056B; # ARMENIAN SMALL LIGATURE MEN INI
+FB16; F; 057E 0576; # ARMENIAN SMALL LIGATURE VEW NOW
+FB17; F; 0574 056D; # ARMENIAN SMALL LIGATURE MEN XEH
+FF21; C; FF41; # FULLWIDTH LATIN CAPITAL LETTER A
+FF22; C; FF42; # FULLWIDTH LATIN CAPITAL LETTER B
+FF23; C; FF43; # FULLWIDTH LATIN CAPITAL LETTER C
+FF24; C; FF44; # FULLWIDTH LATIN CAPITAL LETTER D
+FF25; C; FF45; # FULLWIDTH LATIN CAPITAL LETTER E
+FF26; C; FF46; # FULLWIDTH LATIN CAPITAL LETTER F
+FF27; C; FF47; # FULLWIDTH LATIN CAPITAL LETTER G
+FF28; C; FF48; # FULLWIDTH LATIN CAPITAL LETTER H
+FF29; C; FF49; # FULLWIDTH LATIN CAPITAL LETTER I
+FF2A; C; FF4A; # FULLWIDTH LATIN CAPITAL LETTER J
+FF2B; C; FF4B; # FULLWIDTH LATIN CAPITAL LETTER K
+FF2C; C; FF4C; # FULLWIDTH LATIN CAPITAL LETTER L
+FF2D; C; FF4D; # FULLWIDTH LATIN CAPITAL LETTER M
+FF2E; C; FF4E; # FULLWIDTH LATIN CAPITAL LETTER N
+FF2F; C; FF4F; # FULLWIDTH LATIN CAPITAL LETTER O
+FF30; C; FF50; # FULLWIDTH LATIN CAPITAL LETTER P
+FF31; C; FF51; # FULLWIDTH LATIN CAPITAL LETTER Q
+FF32; C; FF52; # FULLWIDTH LATIN CAPITAL LETTER R
+FF33; C; FF53; # FULLWIDTH LATIN CAPITAL LETTER S
+FF34; C; FF54; # FULLWIDTH LATIN CAPITAL LETTER T
+FF35; C; FF55; # FULLWIDTH LATIN CAPITAL LETTER U
+FF36; C; FF56; # FULLWIDTH LATIN CAPITAL LETTER V
+FF37; C; FF57; # FULLWIDTH LATIN CAPITAL LETTER W
+FF38; C; FF58; # FULLWIDTH LATIN CAPITAL LETTER X
+FF39; C; FF59; # FULLWIDTH LATIN CAPITAL LETTER Y
+FF3A; C; FF5A; # FULLWIDTH LATIN CAPITAL LETTER Z
+10400; C; 10428; # DESERET CAPITAL LETTER LONG I
+10401; C; 10429; # DESERET CAPITAL LETTER LONG E
+10402; C; 1042A; # DESERET CAPITAL LETTER LONG A
+10403; C; 1042B; # DESERET CAPITAL LETTER LONG AH
+10404; C; 1042C; # DESERET CAPITAL LETTER LONG O
+10405; C; 1042D; # DESERET CAPITAL LETTER LONG OO
+10406; C; 1042E; # DESERET CAPITAL LETTER SHORT I
+10407; C; 1042F; # DESERET CAPITAL LETTER SHORT E
+10408; C; 10430; # DESERET CAPITAL LETTER SHORT A
+10409; C; 10431; # DESERET CAPITAL LETTER SHORT AH
+1040A; C; 10432; # DESERET CAPITAL LETTER SHORT O
+1040B; C; 10433; # DESERET CAPITAL LETTER SHORT OO
+1040C; C; 10434; # DESERET CAPITAL LETTER AY
+1040D; C; 10435; # DESERET CAPITAL LETTER OW
+1040E; C; 10436; # DESERET CAPITAL LETTER WU
+1040F; C; 10437; # DESERET CAPITAL LETTER YEE
+10410; C; 10438; # DESERET CAPITAL LETTER H
+10411; C; 10439; # DESERET CAPITAL LETTER PEE
+10412; C; 1043A; # DESERET CAPITAL LETTER BEE
+10413; C; 1043B; # DESERET CAPITAL LETTER TEE
+10414; C; 1043C; # DESERET CAPITAL LETTER DEE
+10415; C; 1043D; # DESERET CAPITAL LETTER CHEE
+10416; C; 1043E; # DESERET CAPITAL LETTER JEE
+10417; C; 1043F; # DESERET CAPITAL LETTER KAY
+10418; C; 10440; # DESERET CAPITAL LETTER GAY
+10419; C; 10441; # DESERET CAPITAL LETTER EF
+1041A; C; 10442; # DESERET CAPITAL LETTER VEE
+1041B; C; 10443; # DESERET CAPITAL LETTER ETH
+1041C; C; 10444; # DESERET CAPITAL LETTER THEE
+1041D; C; 10445; # DESERET CAPITAL LETTER ES
+1041E; C; 10446; # DESERET CAPITAL LETTER ZEE
+1041F; C; 10447; # DESERET CAPITAL LETTER ESH
+10420; C; 10448; # DESERET CAPITAL LETTER ZHEE
+10421; C; 10449; # DESERET CAPITAL LETTER ER
+10422; C; 1044A; # DESERET CAPITAL LETTER EL
+10423; C; 1044B; # DESERET CAPITAL LETTER EM
+10424; C; 1044C; # DESERET CAPITAL LETTER EN
+10425; C; 1044D; # DESERET CAPITAL LETTER ENG
diff --git a/rpython/rlib/unicodedata/CaseFolding-5.2.0.txt b/rpython/rlib/unicodedata/CaseFolding-5.2.0.txt
new file mode 100644
--- /dev/null
+++ b/rpython/rlib/unicodedata/CaseFolding-5.2.0.txt
@@ -0,0 +1,1202 @@
+# CaseFolding-5.2.0.txt
+# Date: 2009-05-28, 23:02:34 GMT [MD]
+#
+# Unicode Character Database
+# Copyright (c) 1991-2009 Unicode, Inc.
+# For terms of use, see http://www.unicode.org/terms_of_use.html
+# For documentation, see http://www.unicode.org/reports/tr44/
+#
+# Case Folding Properties
+#
+# This file is a supplement to the UnicodeData file.
+# It provides a case folding mapping generated from the Unicode Character Database.
+# If all characters are mapped according to the full mapping below, then
+# case differences (according to UnicodeData.txt and SpecialCasing.txt)
+# are eliminated.
+#
+# The data supports both implementations that require simple case foldings
+# (where string lengths don't change), and implementations that allow full case folding
+# (where string lengths may grow). Note that where they can be supported, the
+# full case foldings are superior: for example, they allow "MASSE" and "Maße" to match.
+#
+# All code points not listed in this file map to themselves.
+#
+# NOTE: case folding does not preserve normalization formats!
+#
+# For information on case folding, including how to have case folding
+# preserve normalization formats, see Section 3.13 Default Case Algorithms in
+# The Unicode Standard, Version 5.0.
+#
+# ================================================================================
+# Format
+# ================================================================================
+# The entries in this file are in the following machine-readable format:
+#
+# ; ; ; #
+#
+# The status field is:
+# C: common case folding, common mappings shared by both simple and full mappings.
+# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces.
+# S: simple case folding, mappings to single characters where different from F.
+# T: special case for uppercase I and dotted uppercase I
+# - For non-Turkic languages, this mapping is normally not used.
+# - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters.
+# Note that the Turkic mappings do not maintain canonical equivalence without additional processing.
+# See the discussions of case mapping in the Unicode Standard for more information.
+#
+# Usage:
+# A. To do a simple case folding, use the mappings with status C + S.
+# B. To do a full case folding, use the mappings with status C + F.
+#
+# The mappings with status T can be used or omitted depending on the desired case-folding
+# behavior. (The default option is to exclude them.)
+#
+# =================================================================
+# @missing 0000..10FFFF;
+0041; C; 0061; # LATIN CAPITAL LETTER A
+0042; C; 0062; # LATIN CAPITAL LETTER B
+0043; C; 0063; # LATIN CAPITAL LETTER C
+0044; C; 0064; # LATIN CAPITAL LETTER D
+0045; C; 0065; # LATIN CAPITAL LETTER E
+0046; C; 0066; # LATIN CAPITAL LETTER F
+0047; C; 0067; # LATIN CAPITAL LETTER G
+0048; C; 0068; # LATIN CAPITAL LETTER H
+0049; C; 0069; # LATIN CAPITAL LETTER I
+0049; T; 0131; # LATIN CAPITAL LETTER I
+004A; C; 006A; # LATIN CAPITAL LETTER J
+004B; C; 006B; # LATIN CAPITAL LETTER K
+004C; C; 006C; # LATIN CAPITAL LETTER L
+004D; C; 006D; # LATIN CAPITAL LETTER M
+004E; C; 006E; # LATIN CAPITAL LETTER N
+004F; C; 006F; # LATIN CAPITAL LETTER O
+0050; C; 0070; # LATIN CAPITAL LETTER P
+0051; C; 0071; # LATIN CAPITAL LETTER Q
+0052; C; 0072; # LATIN CAPITAL LETTER R
+0053; C; 0073; # LATIN CAPITAL LETTER S
+0054; C; 0074; # LATIN CAPITAL LETTER T
+0055; C; 0075; # LATIN CAPITAL LETTER U
+0056; C; 0076; # LATIN CAPITAL LETTER V
+0057; C; 0077; # LATIN CAPITAL LETTER W
+0058; C; 0078; # LATIN CAPITAL LETTER X
+0059; C; 0079; # LATIN CAPITAL LETTER Y
+005A; C; 007A; # LATIN CAPITAL LETTER Z
+00B5; C; 03BC; # MICRO SIGN
+00C0; C; 00E0; # LATIN CAPITAL LETTER A WITH GRAVE
+00C1; C; 00E1; # LATIN CAPITAL LETTER A WITH ACUTE
+00C2; C; 00E2; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+00C3; C; 00E3; # LATIN CAPITAL LETTER A WITH TILDE
+00C4; C; 00E4; # LATIN CAPITAL LETTER A WITH DIAERESIS
+00C5; C; 00E5; # LATIN CAPITAL LETTER A WITH RING ABOVE
+00C6; C; 00E6; # LATIN CAPITAL LETTER AE
+00C7; C; 00E7; # LATIN CAPITAL LETTER C WITH CEDILLA
+00C8; C; 00E8; # LATIN CAPITAL LETTER E WITH GRAVE
+00C9; C; 00E9; # LATIN CAPITAL LETTER E WITH ACUTE
+00CA; C; 00EA; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+00CB; C; 00EB; # LATIN CAPITAL LETTER E WITH DIAERESIS
+00CC; C; 00EC; # LATIN CAPITAL LETTER I WITH GRAVE
+00CD; C; 00ED; # LATIN CAPITAL LETTER I WITH ACUTE
+00CE; C; 00EE; # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+00CF; C; 00EF; # LATIN CAPITAL LETTER I WITH DIAERESIS
+00D0; C; 00F0; # LATIN CAPITAL LETTER ETH
+00D1; C; 00F1; # LATIN CAPITAL LETTER N WITH TILDE
+00D2; C; 00F2; # LATIN CAPITAL LETTER O WITH GRAVE
+00D3; C; 00F3; # LATIN CAPITAL LETTER O WITH ACUTE
+00D4; C; 00F4; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+00D5; C; 00F5; # LATIN CAPITAL LETTER O WITH TILDE
+00D6; C; 00F6; # LATIN CAPITAL LETTER O WITH DIAERESIS
+00D8; C; 00F8; # LATIN CAPITAL LETTER O WITH STROKE
+00D9; C; 00F9; # LATIN CAPITAL LETTER U WITH GRAVE
+00DA; C; 00FA; # LATIN CAPITAL LETTER U WITH ACUTE
+00DB; C; 00FB; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+00DC; C; 00FC; # LATIN CAPITAL LETTER U WITH DIAERESIS
+00DD; C; 00FD; # LATIN CAPITAL LETTER Y WITH ACUTE
+00DE; C; 00FE; # LATIN CAPITAL LETTER THORN
+00DF; F; 0073 0073; # LATIN SMALL LETTER SHARP S
+0100; C; 0101; # LATIN CAPITAL LETTER A WITH MACRON
+0102; C; 0103; # LATIN CAPITAL LETTER A WITH BREVE
+0104; C; 0105; # LATIN CAPITAL LETTER A WITH OGONEK
+0106; C; 0107; # LATIN CAPITAL LETTER C WITH ACUTE
+0108; C; 0109; # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+010A; C; 010B; # LATIN CAPITAL LETTER C WITH DOT ABOVE
+010C; C; 010D; # LATIN CAPITAL LETTER C WITH CARON
+010E; C; 010F; # LATIN CAPITAL LETTER D WITH CARON
+0110; C; 0111; # LATIN CAPITAL LETTER D WITH STROKE
+0112; C; 0113; # LATIN CAPITAL LETTER E WITH MACRON
+0114; C; 0115; # LATIN CAPITAL LETTER E WITH BREVE
+0116; C; 0117; # LATIN CAPITAL LETTER E WITH DOT ABOVE
+0118; C; 0119; # LATIN CAPITAL LETTER E WITH OGONEK
+011A; C; 011B; # LATIN CAPITAL LETTER E WITH CARON
+011C; C; 011D; # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+011E; C; 011F; # LATIN CAPITAL LETTER G WITH BREVE
+0120; C; 0121; # LATIN CAPITAL LETTER G WITH DOT ABOVE
+0122; C; 0123; # LATIN CAPITAL LETTER G WITH CEDILLA
+0124; C; 0125; # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+0126; C; 0127; # LATIN CAPITAL LETTER H WITH STROKE
+0128; C; 0129; # LATIN CAPITAL LETTER I WITH TILDE
+012A; C; 012B; # LATIN CAPITAL LETTER I WITH MACRON
+012C; C; 012D; # LATIN CAPITAL LETTER I WITH BREVE
+012E; C; 012F; # LATIN CAPITAL LETTER I WITH OGONEK
+0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0132; C; 0133; # LATIN CAPITAL LIGATURE IJ
+0134; C; 0135; # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+0136; C; 0137; # LATIN CAPITAL LETTER K WITH CEDILLA
+0139; C; 013A; # LATIN CAPITAL LETTER L WITH ACUTE
+013B; C; 013C; # LATIN CAPITAL LETTER L WITH CEDILLA
+013D; C; 013E; # LATIN CAPITAL LETTER L WITH CARON
+013F; C; 0140; # LATIN CAPITAL LETTER L WITH MIDDLE DOT
+0141; C; 0142; # LATIN CAPITAL LETTER L WITH STROKE
+0143; C; 0144; # LATIN CAPITAL LETTER N WITH ACUTE
+0145; C; 0146; # LATIN CAPITAL LETTER N WITH CEDILLA
+0147; C; 0148; # LATIN CAPITAL LETTER N WITH CARON
+0149; F; 02BC 006E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+014A; C; 014B; # LATIN CAPITAL LETTER ENG
+014C; C; 014D; # LATIN CAPITAL LETTER O WITH MACRON
+014E; C; 014F; # LATIN CAPITAL LETTER O WITH BREVE
+0150; C; 0151; # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+0152; C; 0153; # LATIN CAPITAL LIGATURE OE
+0154; C; 0155; # LATIN CAPITAL LETTER R WITH ACUTE
+0156; C; 0157; # LATIN CAPITAL LETTER R WITH CEDILLA
+0158; C; 0159; # LATIN CAPITAL LETTER R WITH CARON
+015A; C; 015B; # LATIN CAPITAL LETTER S WITH ACUTE
+015C; C; 015D; # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+015E; C; 015F; # LATIN CAPITAL LETTER S WITH CEDILLA
+0160; C; 0161; # LATIN CAPITAL LETTER S WITH CARON
+0162; C; 0163; # LATIN CAPITAL LETTER T WITH CEDILLA
+0164; C; 0165; # LATIN CAPITAL LETTER T WITH CARON
+0166; C; 0167; # LATIN CAPITAL LETTER T WITH STROKE
+0168; C; 0169; # LATIN CAPITAL LETTER U WITH TILDE
+016A; C; 016B; # LATIN CAPITAL LETTER U WITH MACRON
+016C; C; 016D; # LATIN CAPITAL LETTER U WITH BREVE
+016E; C; 016F; # LATIN CAPITAL LETTER U WITH RING ABOVE
+0170; C; 0171; # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+0172; C; 0173; # LATIN CAPITAL LETTER U WITH OGONEK
+0174; C; 0175; # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+0176; C; 0177; # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+0178; C; 00FF; # LATIN CAPITAL LETTER Y WITH DIAERESIS
+0179; C; 017A; # LATIN CAPITAL LETTER Z WITH ACUTE
+017B; C; 017C; # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+017D; C; 017E; # LATIN CAPITAL LETTER Z WITH CARON
+017F; C; 0073; # LATIN SMALL LETTER LONG S
+0181; C; 0253; # LATIN CAPITAL LETTER B WITH HOOK
+0182; C; 0183; # LATIN CAPITAL LETTER B WITH TOPBAR
+0184; C; 0185; # LATIN CAPITAL LETTER TONE SIX
+0186; C; 0254; # LATIN CAPITAL LETTER OPEN O
+0187; C; 0188; # LATIN CAPITAL LETTER C WITH HOOK
+0189; C; 0256; # LATIN CAPITAL LETTER AFRICAN D
+018A; C; 0257; # LATIN CAPITAL LETTER D WITH HOOK
+018B; C; 018C; # LATIN CAPITAL LETTER D WITH TOPBAR
+018E; C; 01DD; # LATIN CAPITAL LETTER REVERSED E
+018F; C; 0259; # LATIN CAPITAL LETTER SCHWA
+0190; C; 025B; # LATIN CAPITAL LETTER OPEN E
+0191; C; 0192; # LATIN CAPITAL LETTER F WITH HOOK
+0193; C; 0260; # LATIN CAPITAL LETTER G WITH HOOK
+0194; C; 0263; # LATIN CAPITAL LETTER GAMMA
+0196; C; 0269; # LATIN CAPITAL LETTER IOTA
+0197; C; 0268; # LATIN CAPITAL LETTER I WITH STROKE
+0198; C; 0199; # LATIN CAPITAL LETTER K WITH HOOK
+019C; C; 026F; # LATIN CAPITAL LETTER TURNED M
+019D; C; 0272; # LATIN CAPITAL LETTER N WITH LEFT HOOK
+019F; C; 0275; # LATIN CAPITAL LETTER O WITH MIDDLE TILDE
+01A0; C; 01A1; # LATIN CAPITAL LETTER O WITH HORN
+01A2; C; 01A3; # LATIN CAPITAL LETTER OI
+01A4; C; 01A5; # LATIN CAPITAL LETTER P WITH HOOK
+01A6; C; 0280; # LATIN LETTER YR
+01A7; C; 01A8; # LATIN CAPITAL LETTER TONE TWO
+01A9; C; 0283; # LATIN CAPITAL LETTER ESH
+01AC; C; 01AD; # LATIN CAPITAL LETTER T WITH HOOK
+01AE; C; 0288; # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK
+01AF; C; 01B0; # LATIN CAPITAL LETTER U WITH HORN
+01B1; C; 028A; # LATIN CAPITAL LETTER UPSILON
+01B2; C; 028B; # LATIN CAPITAL LETTER V WITH HOOK
+01B3; C; 01B4; # LATIN CAPITAL LETTER Y WITH HOOK
+01B5; C; 01B6; # LATIN CAPITAL LETTER Z WITH STROKE
+01B7; C; 0292; # LATIN CAPITAL LETTER EZH
+01B8; C; 01B9; # LATIN CAPITAL LETTER EZH REVERSED
+01BC; C; 01BD; # LATIN CAPITAL LETTER TONE FIVE
+01C4; C; 01C6; # LATIN CAPITAL LETTER DZ WITH CARON
+01C5; C; 01C6; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON
+01C7; C; 01C9; # LATIN CAPITAL LETTER LJ
+01C8; C; 01C9; # LATIN CAPITAL LETTER L WITH SMALL LETTER J
+01CA; C; 01CC; # LATIN CAPITAL LETTER NJ
+01CB; C; 01CC; # LATIN CAPITAL LETTER N WITH SMALL LETTER J
+01CD; C; 01CE; # LATIN CAPITAL LETTER A WITH CARON
+01CF; C; 01D0; # LATIN CAPITAL LETTER I WITH CARON
+01D1; C; 01D2; # LATIN CAPITAL LETTER O WITH CARON
+01D3; C; 01D4; # LATIN CAPITAL LETTER U WITH CARON
+01D5; C; 01D6; # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
+01D7; C; 01D8; # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
+01D9; C; 01DA; # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
+01DB; C; 01DC; # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
+01DE; C; 01DF; # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
+01E0; C; 01E1; # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
+01E2; C; 01E3; # LATIN CAPITAL LETTER AE WITH MACRON
+01E4; C; 01E5; # LATIN CAPITAL LETTER G WITH STROKE
+01E6; C; 01E7; # LATIN CAPITAL LETTER G WITH CARON
+01E8; C; 01E9; # LATIN CAPITAL LETTER K WITH CARON
+01EA; C; 01EB; # LATIN CAPITAL LETTER O WITH OGONEK
+01EC; C; 01ED; # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
+01EE; C; 01EF; # LATIN CAPITAL LETTER EZH WITH CARON
+01F0; F; 006A 030C; # LATIN SMALL LETTER J WITH CARON
+01F1; C; 01F3; # LATIN CAPITAL LETTER DZ
+01F2; C; 01F3; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z
+01F4; C; 01F5; # LATIN CAPITAL LETTER G WITH ACUTE
+01F6; C; 0195; # LATIN CAPITAL LETTER HWAIR
+01F7; C; 01BF; # LATIN CAPITAL LETTER WYNN
+01F8; C; 01F9; # LATIN CAPITAL LETTER N WITH GRAVE
+01FA; C; 01FB; # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+01FC; C; 01FD; # LATIN CAPITAL LETTER AE WITH ACUTE
+01FE; C; 01FF; # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+0200; C; 0201; # LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
+0202; C; 0203; # LATIN CAPITAL LETTER A WITH INVERTED BREVE
+0204; C; 0205; # LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
+0206; C; 0207; # LATIN CAPITAL LETTER E WITH INVERTED BREVE
+0208; C; 0209; # LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
+020A; C; 020B; # LATIN CAPITAL LETTER I WITH INVERTED BREVE
+020C; C; 020D; # LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
+020E; C; 020F; # LATIN CAPITAL LETTER O WITH INVERTED BREVE
+0210; C; 0211; # LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
+0212; C; 0213; # LATIN CAPITAL LETTER R WITH INVERTED BREVE
+0214; C; 0215; # LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
+0216; C; 0217; # LATIN CAPITAL LETTER U WITH INVERTED BREVE
+0218; C; 0219; # LATIN CAPITAL LETTER S WITH COMMA BELOW
+021A; C; 021B; # LATIN CAPITAL LETTER T WITH COMMA BELOW
+021C; C; 021D; # LATIN CAPITAL LETTER YOGH
+021E; C; 021F; # LATIN CAPITAL LETTER H WITH CARON
+0220; C; 019E; # LATIN CAPITAL LETTER N WITH LONG RIGHT LEG
+0222; C; 0223; # LATIN CAPITAL LETTER OU
+0224; C; 0225; # LATIN CAPITAL LETTER Z WITH HOOK
+0226; C; 0227; # LATIN CAPITAL LETTER A WITH DOT ABOVE
+0228; C; 0229; # LATIN CAPITAL LETTER E WITH CEDILLA
+022A; C; 022B; # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
+022C; C; 022D; # LATIN CAPITAL LETTER O WITH TILDE AND MACRON
+022E; C; 022F; # LATIN CAPITAL LETTER O WITH DOT ABOVE
+0230; C; 0231; # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
+0232; C; 0233; # LATIN CAPITAL LETTER Y WITH MACRON
+023A; C; 2C65; # LATIN CAPITAL LETTER A WITH STROKE
+023B; C; 023C; # LATIN CAPITAL LETTER C WITH STROKE
+023D; C; 019A; # LATIN CAPITAL LETTER L WITH BAR
+023E; C; 2C66; # LATIN CAPITAL LETTER T WITH DIAGONAL STROKE
+0241; C; 0242; # LATIN CAPITAL LETTER GLOTTAL STOP
+0243; C; 0180; # LATIN CAPITAL LETTER B WITH STROKE
+0244; C; 0289; # LATIN CAPITAL LETTER U BAR
+0245; C; 028C; # LATIN CAPITAL LETTER TURNED V
+0246; C; 0247; # LATIN CAPITAL LETTER E WITH STROKE
+0248; C; 0249; # LATIN CAPITAL LETTER J WITH STROKE
+024A; C; 024B; # LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL
+024C; C; 024D; # LATIN CAPITAL LETTER R WITH STROKE
+024E; C; 024F; # LATIN CAPITAL LETTER Y WITH STROKE
+0345; C; 03B9; # COMBINING GREEK YPOGEGRAMMENI
+0370; C; 0371; # GREEK CAPITAL LETTER HETA
+0372; C; 0373; # GREEK CAPITAL LETTER ARCHAIC SAMPI
+0376; C; 0377; # GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA
+0386; C; 03AC; # GREEK CAPITAL LETTER ALPHA WITH TONOS
+0388; C; 03AD; # GREEK CAPITAL LETTER EPSILON WITH TONOS
+0389; C; 03AE; # GREEK CAPITAL LETTER ETA WITH TONOS
+038A; C; 03AF; # GREEK CAPITAL LETTER IOTA WITH TONOS
+038C; C; 03CC; # GREEK CAPITAL LETTER OMICRON WITH TONOS
+038E; C; 03CD; # GREEK CAPITAL LETTER UPSILON WITH TONOS
+038F; C; 03CE; # GREEK CAPITAL LETTER OMEGA WITH TONOS
+0390; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+0391; C; 03B1; # GREEK CAPITAL LETTER ALPHA
+0392; C; 03B2; # GREEK CAPITAL LETTER BETA
+0393; C; 03B3; # GREEK CAPITAL LETTER GAMMA
+0394; C; 03B4; # GREEK CAPITAL LETTER DELTA
+0395; C; 03B5; # GREEK CAPITAL LETTER EPSILON
+0396; C; 03B6; # GREEK CAPITAL LETTER ZETA
+0397; C; 03B7; # GREEK CAPITAL LETTER ETA
+0398; C; 03B8; # GREEK CAPITAL LETTER THETA
+0399; C; 03B9; # GREEK CAPITAL LETTER IOTA
+039A; C; 03BA; # GREEK CAPITAL LETTER KAPPA
+039B; C; 03BB; # GREEK CAPITAL LETTER LAMDA
+039C; C; 03BC; # GREEK CAPITAL LETTER MU
+039D; C; 03BD; # GREEK CAPITAL LETTER NU
+039E; C; 03BE; # GREEK CAPITAL LETTER XI
+039F; C; 03BF; # GREEK CAPITAL LETTER OMICRON
+03A0; C; 03C0; # GREEK CAPITAL LETTER PI
+03A1; C; 03C1; # GREEK CAPITAL LETTER RHO
+03A3; C; 03C3; # GREEK CAPITAL LETTER SIGMA
+03A4; C; 03C4; # GREEK CAPITAL LETTER TAU
+03A5; C; 03C5; # GREEK CAPITAL LETTER UPSILON
+03A6; C; 03C6; # GREEK CAPITAL LETTER PHI
+03A7; C; 03C7; # GREEK CAPITAL LETTER CHI
+03A8; C; 03C8; # GREEK CAPITAL LETTER PSI
+03A9; C; 03C9; # GREEK CAPITAL LETTER OMEGA
+03AA; C; 03CA; # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+03AB; C; 03CB; # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+03B0; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+03C2; C; 03C3; # GREEK SMALL LETTER FINAL SIGMA
+03CF; C; 03D7; # GREEK CAPITAL KAI SYMBOL
+03D0; C; 03B2; # GREEK BETA SYMBOL
+03D1; C; 03B8; # GREEK THETA SYMBOL
+03D5; C; 03C6; # GREEK PHI SYMBOL
+03D6; C; 03C0; # GREEK PI SYMBOL
+03D8; C; 03D9; # GREEK LETTER ARCHAIC KOPPA
+03DA; C; 03DB; # GREEK LETTER STIGMA
+03DC; C; 03DD; # GREEK LETTER DIGAMMA
+03DE; C; 03DF; # GREEK LETTER KOPPA
+03E0; C; 03E1; # GREEK LETTER SAMPI
+03E2; C; 03E3; # COPTIC CAPITAL LETTER SHEI
+03E4; C; 03E5; # COPTIC CAPITAL LETTER FEI
+03E6; C; 03E7; # COPTIC CAPITAL LETTER KHEI
+03E8; C; 03E9; # COPTIC CAPITAL LETTER HORI
+03EA; C; 03EB; # COPTIC CAPITAL LETTER GANGIA
+03EC; C; 03ED; # COPTIC CAPITAL LETTER SHIMA
+03EE; C; 03EF; # COPTIC CAPITAL LETTER DEI
+03F0; C; 03BA; # GREEK KAPPA SYMBOL
+03F1; C; 03C1; # GREEK RHO SYMBOL
+03F4; C; 03B8; # GREEK CAPITAL THETA SYMBOL
+03F5; C; 03B5; # GREEK LUNATE EPSILON SYMBOL
+03F7; C; 03F8; # GREEK CAPITAL LETTER SHO
+03F9; C; 03F2; # GREEK CAPITAL LUNATE SIGMA SYMBOL
+03FA; C; 03FB; # GREEK CAPITAL LETTER SAN
+03FD; C; 037B; # GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL
+03FE; C; 037C; # GREEK CAPITAL DOTTED LUNATE SIGMA SYMBOL
+03FF; C; 037D; # GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL
+0400; C; 0450; # CYRILLIC CAPITAL LETTER IE WITH GRAVE
+0401; C; 0451; # CYRILLIC CAPITAL LETTER IO
+0402; C; 0452; # CYRILLIC CAPITAL LETTER DJE
+0403; C; 0453; # CYRILLIC CAPITAL LETTER GJE
+0404; C; 0454; # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+0405; C; 0455; # CYRILLIC CAPITAL LETTER DZE
+0406; C; 0456; # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+0407; C; 0457; # CYRILLIC CAPITAL LETTER YI
+0408; C; 0458; # CYRILLIC CAPITAL LETTER JE
+0409; C; 0459; # CYRILLIC CAPITAL LETTER LJE
+040A; C; 045A; # CYRILLIC CAPITAL LETTER NJE
+040B; C; 045B; # CYRILLIC CAPITAL LETTER TSHE
+040C; C; 045C; # CYRILLIC CAPITAL LETTER KJE
+040D; C; 045D; # CYRILLIC CAPITAL LETTER I WITH GRAVE
+040E; C; 045E; # CYRILLIC CAPITAL LETTER SHORT U
+040F; C; 045F; # CYRILLIC CAPITAL LETTER DZHE
+0410; C; 0430; # CYRILLIC CAPITAL LETTER A
+0411; C; 0431; # CYRILLIC CAPITAL LETTER BE
+0412; C; 0432; # CYRILLIC CAPITAL LETTER VE
+0413; C; 0433; # CYRILLIC CAPITAL LETTER GHE
+0414; C; 0434; # CYRILLIC CAPITAL LETTER DE
+0415; C; 0435; # CYRILLIC CAPITAL LETTER IE
+0416; C; 0436; # CYRILLIC CAPITAL LETTER ZHE
+0417; C; 0437; # CYRILLIC CAPITAL LETTER ZE
+0418; C; 0438; # CYRILLIC CAPITAL LETTER I
+0419; C; 0439; # CYRILLIC CAPITAL LETTER SHORT I
+041A; C; 043A; # CYRILLIC CAPITAL LETTER KA
+041B; C; 043B; # CYRILLIC CAPITAL LETTER EL
+041C; C; 043C; # CYRILLIC CAPITAL LETTER EM
+041D; C; 043D; # CYRILLIC CAPITAL LETTER EN
+041E; C; 043E; # CYRILLIC CAPITAL LETTER O
+041F; C; 043F; # CYRILLIC CAPITAL LETTER PE
+0420; C; 0440; # CYRILLIC CAPITAL LETTER ER
+0421; C; 0441; # CYRILLIC CAPITAL LETTER ES
+0422; C; 0442; # CYRILLIC CAPITAL LETTER TE
+0423; C; 0443; # CYRILLIC CAPITAL LETTER U
+0424; C; 0444; # CYRILLIC CAPITAL LETTER EF
+0425; C; 0445; # CYRILLIC CAPITAL LETTER HA
+0426; C; 0446; # CYRILLIC CAPITAL LETTER TSE
+0427; C; 0447; # CYRILLIC CAPITAL LETTER CHE
+0428; C; 0448; # CYRILLIC CAPITAL LETTER SHA
+0429; C; 0449; # CYRILLIC CAPITAL LETTER SHCHA
+042A; C; 044A; # CYRILLIC CAPITAL LETTER HARD SIGN
+042B; C; 044B; # CYRILLIC CAPITAL LETTER YERU
+042C; C; 044C; # CYRILLIC CAPITAL LETTER SOFT SIGN
+042D; C; 044D; # CYRILLIC CAPITAL LETTER E
+042E; C; 044E; # CYRILLIC CAPITAL LETTER YU
+042F; C; 044F; # CYRILLIC CAPITAL LETTER YA
+0460; C; 0461; # CYRILLIC CAPITAL LETTER OMEGA
+0462; C; 0463; # CYRILLIC CAPITAL LETTER YAT
+0464; C; 0465; # CYRILLIC CAPITAL LETTER IOTIFIED E
+0466; C; 0467; # CYRILLIC CAPITAL LETTER LITTLE YUS
+0468; C; 0469; # CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS
+046A; C; 046B; # CYRILLIC CAPITAL LETTER BIG YUS
+046C; C; 046D; # CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS
+046E; C; 046F; # CYRILLIC CAPITAL LETTER KSI
+0470; C; 0471; # CYRILLIC CAPITAL LETTER PSI
+0472; C; 0473; # CYRILLIC CAPITAL LETTER FITA
+0474; C; 0475; # CYRILLIC CAPITAL LETTER IZHITSA
+0476; C; 0477; # CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
+0478; C; 0479; # CYRILLIC CAPITAL LETTER UK
+047A; C; 047B; # CYRILLIC CAPITAL LETTER ROUND OMEGA
+047C; C; 047D; # CYRILLIC CAPITAL LETTER OMEGA WITH TITLO
+047E; C; 047F; # CYRILLIC CAPITAL LETTER OT
+0480; C; 0481; # CYRILLIC CAPITAL LETTER KOPPA
+048A; C; 048B; # CYRILLIC CAPITAL LETTER SHORT I WITH TAIL
+048C; C; 048D; # CYRILLIC CAPITAL LETTER SEMISOFT SIGN
+048E; C; 048F; # CYRILLIC CAPITAL LETTER ER WITH TICK
+0490; C; 0491; # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+0492; C; 0493; # CYRILLIC CAPITAL LETTER GHE WITH STROKE
+0494; C; 0495; # CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK
+0496; C; 0497; # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
+0498; C; 0499; # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
+049A; C; 049B; # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
+049C; C; 049D; # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
+049E; C; 049F; # CYRILLIC CAPITAL LETTER KA WITH STROKE
+04A0; C; 04A1; # CYRILLIC CAPITAL LETTER BASHKIR KA
+04A2; C; 04A3; # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
+04A4; C; 04A5; # CYRILLIC CAPITAL LIGATURE EN GHE
+04A6; C; 04A7; # CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK
+04A8; C; 04A9; # CYRILLIC CAPITAL LETTER ABKHASIAN HA
+04AA; C; 04AB; # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
+04AC; C; 04AD; # CYRILLIC CAPITAL LETTER TE WITH DESCENDER
+04AE; C; 04AF; # CYRILLIC CAPITAL LETTER STRAIGHT U
+04B0; C; 04B1; # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
+04B2; C; 04B3; # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
+04B4; C; 04B5; # CYRILLIC CAPITAL LIGATURE TE TSE
+04B6; C; 04B7; # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
+04B8; C; 04B9; # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
+04BA; C; 04BB; # CYRILLIC CAPITAL LETTER SHHA
+04BC; C; 04BD; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE
+04BE; C; 04BF; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER
+04C0; C; 04CF; # CYRILLIC LETTER PALOCHKA
+04C1; C; 04C2; # CYRILLIC CAPITAL LETTER ZHE WITH BREVE
+04C3; C; 04C4; # CYRILLIC CAPITAL LETTER KA WITH HOOK
+04C5; C; 04C6; # CYRILLIC CAPITAL LETTER EL WITH TAIL
+04C7; C; 04C8; # CYRILLIC CAPITAL LETTER EN WITH HOOK
+04C9; C; 04CA; # CYRILLIC CAPITAL LETTER EN WITH TAIL
+04CB; C; 04CC; # CYRILLIC CAPITAL LETTER KHAKASSIAN CHE
+04CD; C; 04CE; # CYRILLIC CAPITAL LETTER EM WITH TAIL
+04D0; C; 04D1; # CYRILLIC CAPITAL LETTER A WITH BREVE
+04D2; C; 04D3; # CYRILLIC CAPITAL LETTER A WITH DIAERESIS
+04D4; C; 04D5; # CYRILLIC CAPITAL LIGATURE A IE
+04D6; C; 04D7; # CYRILLIC CAPITAL LETTER IE WITH BREVE
+04D8; C; 04D9; # CYRILLIC CAPITAL LETTER SCHWA
+04DA; C; 04DB; # CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
+04DC; C; 04DD; # CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
+04DE; C; 04DF; # CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
+04E0; C; 04E1; # CYRILLIC CAPITAL LETTER ABKHASIAN DZE
+04E2; C; 04E3; # CYRILLIC CAPITAL LETTER I WITH MACRON
+04E4; C; 04E5; # CYRILLIC CAPITAL LETTER I WITH DIAERESIS
+04E6; C; 04E7; # CYRILLIC CAPITAL LETTER O WITH DIAERESIS
+04E8; C; 04E9; # CYRILLIC CAPITAL LETTER BARRED O
+04EA; C; 04EB; # CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS
+04EC; C; 04ED; # CYRILLIC CAPITAL LETTER E WITH DIAERESIS
+04EE; C; 04EF; # CYRILLIC CAPITAL LETTER U WITH MACRON
+04F0; C; 04F1; # CYRILLIC CAPITAL LETTER U WITH DIAERESIS
+04F2; C; 04F3; # CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
+04F4; C; 04F5; # CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
+04F6; C; 04F7; # CYRILLIC CAPITAL LETTER GHE WITH DESCENDER
+04F8; C; 04F9; # CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
+04FA; C; 04FB; # CYRILLIC CAPITAL LETTER GHE WITH STROKE AND HOOK
+04FC; C; 04FD; # CYRILLIC CAPITAL LETTER HA WITH HOOK
+04FE; C; 04FF; # CYRILLIC CAPITAL LETTER HA WITH STROKE
+0500; C; 0501; # CYRILLIC CAPITAL LETTER KOMI DE
+0502; C; 0503; # CYRILLIC CAPITAL LETTER KOMI DJE
+0504; C; 0505; # CYRILLIC CAPITAL LETTER KOMI ZJE
+0506; C; 0507; # CYRILLIC CAPITAL LETTER KOMI DZJE
+0508; C; 0509; # CYRILLIC CAPITAL LETTER KOMI LJE
+050A; C; 050B; # CYRILLIC CAPITAL LETTER KOMI NJE
+050C; C; 050D; # CYRILLIC CAPITAL LETTER KOMI SJE
+050E; C; 050F; # CYRILLIC CAPITAL LETTER KOMI TJE
+0510; C; 0511; # CYRILLIC CAPITAL LETTER REVERSED ZE
+0512; C; 0513; # CYRILLIC CAPITAL LETTER EL WITH HOOK
+0514; C; 0515; # CYRILLIC CAPITAL LETTER LHA
+0516; C; 0517; # CYRILLIC CAPITAL LETTER RHA
+0518; C; 0519; # CYRILLIC CAPITAL LETTER YAE
+051A; C; 051B; # CYRILLIC CAPITAL LETTER QA
+051C; C; 051D; # CYRILLIC CAPITAL LETTER WE
+051E; C; 051F; # CYRILLIC CAPITAL LETTER ALEUT KA
+0520; C; 0521; # CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK
+0522; C; 0523; # CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK
+0524; C; 0525; # CYRILLIC CAPITAL LETTER PE WITH DESCENDER
+0531; C; 0561; # ARMENIAN CAPITAL LETTER AYB
+0532; C; 0562; # ARMENIAN CAPITAL LETTER BEN
+0533; C; 0563; # ARMENIAN CAPITAL LETTER GIM
+0534; C; 0564; # ARMENIAN CAPITAL LETTER DA
+0535; C; 0565; # ARMENIAN CAPITAL LETTER ECH
+0536; C; 0566; # ARMENIAN CAPITAL LETTER ZA
+0537; C; 0567; # ARMENIAN CAPITAL LETTER EH
+0538; C; 0568; # ARMENIAN CAPITAL LETTER ET
+0539; C; 0569; # ARMENIAN CAPITAL LETTER TO
+053A; C; 056A; # ARMENIAN CAPITAL LETTER ZHE
+053B; C; 056B; # ARMENIAN CAPITAL LETTER INI
+053C; C; 056C; # ARMENIAN CAPITAL LETTER LIWN
+053D; C; 056D; # ARMENIAN CAPITAL LETTER XEH
+053E; C; 056E; # ARMENIAN CAPITAL LETTER CA
+053F; C; 056F; # ARMENIAN CAPITAL LETTER KEN
+0540; C; 0570; # ARMENIAN CAPITAL LETTER HO
+0541; C; 0571; # ARMENIAN CAPITAL LETTER JA
+0542; C; 0572; # ARMENIAN CAPITAL LETTER GHAD
+0543; C; 0573; # ARMENIAN CAPITAL LETTER CHEH
+0544; C; 0574; # ARMENIAN CAPITAL LETTER MEN
+0545; C; 0575; # ARMENIAN CAPITAL LETTER YI
+0546; C; 0576; # ARMENIAN CAPITAL LETTER NOW
+0547; C; 0577; # ARMENIAN CAPITAL LETTER SHA
+0548; C; 0578; # ARMENIAN CAPITAL LETTER VO
+0549; C; 0579; # ARMENIAN CAPITAL LETTER CHA
+054A; C; 057A; # ARMENIAN CAPITAL LETTER PEH
+054B; C; 057B; # ARMENIAN CAPITAL LETTER JHEH
+054C; C; 057C; # ARMENIAN CAPITAL LETTER RA
+054D; C; 057D; # ARMENIAN CAPITAL LETTER SEH
+054E; C; 057E; # ARMENIAN CAPITAL LETTER VEW
+054F; C; 057F; # ARMENIAN CAPITAL LETTER TIWN
+0550; C; 0580; # ARMENIAN CAPITAL LETTER REH
+0551; C; 0581; # ARMENIAN CAPITAL LETTER CO
+0552; C; 0582; # ARMENIAN CAPITAL LETTER YIWN
+0553; C; 0583; # ARMENIAN CAPITAL LETTER PIWR
+0554; C; 0584; # ARMENIAN CAPITAL LETTER KEH
+0555; C; 0585; # ARMENIAN CAPITAL LETTER OH
+0556; C; 0586; # ARMENIAN CAPITAL LETTER FEH
+0587; F; 0565 0582; # ARMENIAN SMALL LIGATURE ECH YIWN
+10A0; C; 2D00; # GEORGIAN CAPITAL LETTER AN
+10A1; C; 2D01; # GEORGIAN CAPITAL LETTER BAN
+10A2; C; 2D02; # GEORGIAN CAPITAL LETTER GAN
+10A3; C; 2D03; # GEORGIAN CAPITAL LETTER DON
+10A4; C; 2D04; # GEORGIAN CAPITAL LETTER EN
+10A5; C; 2D05; # GEORGIAN CAPITAL LETTER VIN
+10A6; C; 2D06; # GEORGIAN CAPITAL LETTER ZEN
+10A7; C; 2D07; # GEORGIAN CAPITAL LETTER TAN
+10A8; C; 2D08; # GEORGIAN CAPITAL LETTER IN
+10A9; C; 2D09; # GEORGIAN CAPITAL LETTER KAN
+10AA; C; 2D0A; # GEORGIAN CAPITAL LETTER LAS
+10AB; C; 2D0B; # GEORGIAN CAPITAL LETTER MAN
+10AC; C; 2D0C; # GEORGIAN CAPITAL LETTER NAR
+10AD; C; 2D0D; # GEORGIAN CAPITAL LETTER ON
+10AE; C; 2D0E; # GEORGIAN CAPITAL LETTER PAR
+10AF; C; 2D0F; # GEORGIAN CAPITAL LETTER ZHAR
+10B0; C; 2D10; # GEORGIAN CAPITAL LETTER RAE
+10B1; C; 2D11; # GEORGIAN CAPITAL LETTER SAN
+10B2; C; 2D12; # GEORGIAN CAPITAL LETTER TAR
+10B3; C; 2D13; # GEORGIAN CAPITAL LETTER UN
+10B4; C; 2D14; # GEORGIAN CAPITAL LETTER PHAR
+10B5; C; 2D15; # GEORGIAN CAPITAL LETTER KHAR
+10B6; C; 2D16; # GEORGIAN CAPITAL LETTER GHAN
+10B7; C; 2D17; # GEORGIAN CAPITAL LETTER QAR
+10B8; C; 2D18; # GEORGIAN CAPITAL LETTER SHIN
+10B9; C; 2D19; # GEORGIAN CAPITAL LETTER CHIN
+10BA; C; 2D1A; # GEORGIAN CAPITAL LETTER CAN
+10BB; C; 2D1B; # GEORGIAN CAPITAL LETTER JIL
+10BC; C; 2D1C; # GEORGIAN CAPITAL LETTER CIL
+10BD; C; 2D1D; # GEORGIAN CAPITAL LETTER CHAR
+10BE; C; 2D1E; # GEORGIAN CAPITAL LETTER XAN
+10BF; C; 2D1F; # GEORGIAN CAPITAL LETTER JHAN
+10C0; C; 2D20; # GEORGIAN CAPITAL LETTER HAE
+10C1; C; 2D21; # GEORGIAN CAPITAL LETTER HE
+10C2; C; 2D22; # GEORGIAN CAPITAL LETTER HIE
+10C3; C; 2D23; # GEORGIAN CAPITAL LETTER WE
+10C4; C; 2D24; # GEORGIAN CAPITAL LETTER HAR
+10C5; C; 2D25; # GEORGIAN CAPITAL LETTER HOE
+1E00; C; 1E01; # LATIN CAPITAL LETTER A WITH RING BELOW
+1E02; C; 1E03; # LATIN CAPITAL LETTER B WITH DOT ABOVE
+1E04; C; 1E05; # LATIN CAPITAL LETTER B WITH DOT BELOW
+1E06; C; 1E07; # LATIN CAPITAL LETTER B WITH LINE BELOW
+1E08; C; 1E09; # LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
+1E0A; C; 1E0B; # LATIN CAPITAL LETTER D WITH DOT ABOVE
+1E0C; C; 1E0D; # LATIN CAPITAL LETTER D WITH DOT BELOW
+1E0E; C; 1E0F; # LATIN CAPITAL LETTER D WITH LINE BELOW
+1E10; C; 1E11; # LATIN CAPITAL LETTER D WITH CEDILLA
+1E12; C; 1E13; # LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
+1E14; C; 1E15; # LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
+1E16; C; 1E17; # LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
+1E18; C; 1E19; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
+1E1A; C; 1E1B; # LATIN CAPITAL LETTER E WITH TILDE BELOW
+1E1C; C; 1E1D; # LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
+1E1E; C; 1E1F; # LATIN CAPITAL LETTER F WITH DOT ABOVE
+1E20; C; 1E21; # LATIN CAPITAL LETTER G WITH MACRON
+1E22; C; 1E23; # LATIN CAPITAL LETTER H WITH DOT ABOVE
+1E24; C; 1E25; # LATIN CAPITAL LETTER H WITH DOT BELOW
+1E26; C; 1E27; # LATIN CAPITAL LETTER H WITH DIAERESIS
+1E28; C; 1E29; # LATIN CAPITAL LETTER H WITH CEDILLA
+1E2A; C; 1E2B; # LATIN CAPITAL LETTER H WITH BREVE BELOW
+1E2C; C; 1E2D; # LATIN CAPITAL LETTER I WITH TILDE BELOW
+1E2E; C; 1E2F; # LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
+1E30; C; 1E31; # LATIN CAPITAL LETTER K WITH ACUTE
+1E32; C; 1E33; # LATIN CAPITAL LETTER K WITH DOT BELOW
+1E34; C; 1E35; # LATIN CAPITAL LETTER K WITH LINE BELOW
+1E36; C; 1E37; # LATIN CAPITAL LETTER L WITH DOT BELOW
+1E38; C; 1E39; # LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
+1E3A; C; 1E3B; # LATIN CAPITAL LETTER L WITH LINE BELOW
+1E3C; C; 1E3D; # LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
+1E3E; C; 1E3F; # LATIN CAPITAL LETTER M WITH ACUTE
+1E40; C; 1E41; # LATIN CAPITAL LETTER M WITH DOT ABOVE
+1E42; C; 1E43; # LATIN CAPITAL LETTER M WITH DOT BELOW
+1E44; C; 1E45; # LATIN CAPITAL LETTER N WITH DOT ABOVE
+1E46; C; 1E47; # LATIN CAPITAL LETTER N WITH DOT BELOW
+1E48; C; 1E49; # LATIN CAPITAL LETTER N WITH LINE BELOW
+1E4A; C; 1E4B; # LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
+1E4C; C; 1E4D; # LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
+1E4E; C; 1E4F; # LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
+1E50; C; 1E51; # LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
+1E52; C; 1E53; # LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
+1E54; C; 1E55; # LATIN CAPITAL LETTER P WITH ACUTE
+1E56; C; 1E57; # LATIN CAPITAL LETTER P WITH DOT ABOVE
+1E58; C; 1E59; # LATIN CAPITAL LETTER R WITH DOT ABOVE
+1E5A; C; 1E5B; # LATIN CAPITAL LETTER R WITH DOT BELOW
+1E5C; C; 1E5D; # LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
+1E5E; C; 1E5F; # LATIN CAPITAL LETTER R WITH LINE BELOW
+1E60; C; 1E61; # LATIN CAPITAL LETTER S WITH DOT ABOVE
+1E62; C; 1E63; # LATIN CAPITAL LETTER S WITH DOT BELOW
+1E64; C; 1E65; # LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
+1E66; C; 1E67; # LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
+1E68; C; 1E69; # LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
+1E6A; C; 1E6B; # LATIN CAPITAL LETTER T WITH DOT ABOVE
+1E6C; C; 1E6D; # LATIN CAPITAL LETTER T WITH DOT BELOW
+1E6E; C; 1E6F; # LATIN CAPITAL LETTER T WITH LINE BELOW
+1E70; C; 1E71; # LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
+1E72; C; 1E73; # LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
+1E74; C; 1E75; # LATIN CAPITAL LETTER U WITH TILDE BELOW
+1E76; C; 1E77; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
+1E78; C; 1E79; # LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
+1E7A; C; 1E7B; # LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
+1E7C; C; 1E7D; # LATIN CAPITAL LETTER V WITH TILDE
+1E7E; C; 1E7F; # LATIN CAPITAL LETTER V WITH DOT BELOW
+1E80; C; 1E81; # LATIN CAPITAL LETTER W WITH GRAVE
+1E82; C; 1E83; # LATIN CAPITAL LETTER W WITH ACUTE
+1E84; C; 1E85; # LATIN CAPITAL LETTER W WITH DIAERESIS
+1E86; C; 1E87; # LATIN CAPITAL LETTER W WITH DOT ABOVE
+1E88; C; 1E89; # LATIN CAPITAL LETTER W WITH DOT BELOW
+1E8A; C; 1E8B; # LATIN CAPITAL LETTER X WITH DOT ABOVE
+1E8C; C; 1E8D; # LATIN CAPITAL LETTER X WITH DIAERESIS
+1E8E; C; 1E8F; # LATIN CAPITAL LETTER Y WITH DOT ABOVE
+1E90; C; 1E91; # LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
+1E92; C; 1E93; # LATIN CAPITAL LETTER Z WITH DOT BELOW
+1E94; C; 1E95; # LATIN CAPITAL LETTER Z WITH LINE BELOW
+1E96; F; 0068 0331; # LATIN SMALL LETTER H WITH LINE BELOW
+1E97; F; 0074 0308; # LATIN SMALL LETTER T WITH DIAERESIS
+1E98; F; 0077 030A; # LATIN SMALL LETTER W WITH RING ABOVE
+1E99; F; 0079 030A; # LATIN SMALL LETTER Y WITH RING ABOVE
+1E9A; F; 0061 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING
+1E9B; C; 1E61; # LATIN SMALL LETTER LONG S WITH DOT ABOVE
+1E9E; F; 0073 0073; # LATIN CAPITAL LETTER SHARP S
+1E9E; S; 00DF; # LATIN CAPITAL LETTER SHARP S
+1EA0; C; 1EA1; # LATIN CAPITAL LETTER A WITH DOT BELOW
+1EA2; C; 1EA3; # LATIN CAPITAL LETTER A WITH HOOK ABOVE
+1EA4; C; 1EA5; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
+1EA6; C; 1EA7; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
+1EA8; C; 1EA9; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
+1EAA; C; 1EAB; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
+1EAC; C; 1EAD; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
+1EAE; C; 1EAF; # LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
+1EB0; C; 1EB1; # LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
+1EB2; C; 1EB3; # LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
+1EB4; C; 1EB5; # LATIN CAPITAL LETTER A WITH BREVE AND TILDE
+1EB6; C; 1EB7; # LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
+1EB8; C; 1EB9; # LATIN CAPITAL LETTER E WITH DOT BELOW
+1EBA; C; 1EBB; # LATIN CAPITAL LETTER E WITH HOOK ABOVE
+1EBC; C; 1EBD; # LATIN CAPITAL LETTER E WITH TILDE
+1EBE; C; 1EBF; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
+1EC0; C; 1EC1; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
+1EC2; C; 1EC3; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
+1EC4; C; 1EC5; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
+1EC6; C; 1EC7; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
+1EC8; C; 1EC9; # LATIN CAPITAL LETTER I WITH HOOK ABOVE
+1ECA; C; 1ECB; # LATIN CAPITAL LETTER I WITH DOT BELOW
+1ECC; C; 1ECD; # LATIN CAPITAL LETTER O WITH DOT BELOW
+1ECE; C; 1ECF; # LATIN CAPITAL LETTER O WITH HOOK ABOVE
+1ED0; C; 1ED1; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
+1ED2; C; 1ED3; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
+1ED4; C; 1ED5; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
+1ED6; C; 1ED7; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
+1ED8; C; 1ED9; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
+1EDA; C; 1EDB; # LATIN CAPITAL LETTER O WITH HORN AND ACUTE
+1EDC; C; 1EDD; # LATIN CAPITAL LETTER O WITH HORN AND GRAVE
+1EDE; C; 1EDF; # LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
+1EE0; C; 1EE1; # LATIN CAPITAL LETTER O WITH HORN AND TILDE
+1EE2; C; 1EE3; # LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
+1EE4; C; 1EE5; # LATIN CAPITAL LETTER U WITH DOT BELOW
+1EE6; C; 1EE7; # LATIN CAPITAL LETTER U WITH HOOK ABOVE
+1EE8; C; 1EE9; # LATIN CAPITAL LETTER U WITH HORN AND ACUTE
+1EEA; C; 1EEB; # LATIN CAPITAL LETTER U WITH HORN AND GRAVE
+1EEC; C; 1EED; # LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
+1EEE; C; 1EEF; # LATIN CAPITAL LETTER U WITH HORN AND TILDE
+1EF0; C; 1EF1; # LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
+1EF2; C; 1EF3; # LATIN CAPITAL LETTER Y WITH GRAVE
+1EF4; C; 1EF5; # LATIN CAPITAL LETTER Y WITH DOT BELOW
+1EF6; C; 1EF7; # LATIN CAPITAL LETTER Y WITH HOOK ABOVE
+1EF8; C; 1EF9; # LATIN CAPITAL LETTER Y WITH TILDE
+1EFA; C; 1EFB; # LATIN CAPITAL LETTER MIDDLE-WELSH LL
+1EFC; C; 1EFD; # LATIN CAPITAL LETTER MIDDLE-WELSH V
+1EFE; C; 1EFF; # LATIN CAPITAL LETTER Y WITH LOOP
+1F08; C; 1F00; # GREEK CAPITAL LETTER ALPHA WITH PSILI
+1F09; C; 1F01; # GREEK CAPITAL LETTER ALPHA WITH DASIA
+1F0A; C; 1F02; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA
+1F0B; C; 1F03; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA
+1F0C; C; 1F04; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA
+1F0D; C; 1F05; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA
+1F0E; C; 1F06; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI
+1F0F; C; 1F07; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI
+1F18; C; 1F10; # GREEK CAPITAL LETTER EPSILON WITH PSILI
+1F19; C; 1F11; # GREEK CAPITAL LETTER EPSILON WITH DASIA
+1F1A; C; 1F12; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND VARIA
+1F1B; C; 1F13; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND VARIA
+1F1C; C; 1F14; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND OXIA
+1F1D; C; 1F15; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA
+1F28; C; 1F20; # GREEK CAPITAL LETTER ETA WITH PSILI
+1F29; C; 1F21; # GREEK CAPITAL LETTER ETA WITH DASIA
+1F2A; C; 1F22; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA
+1F2B; C; 1F23; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA
+1F2C; C; 1F24; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA
+1F2D; C; 1F25; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA
+1F2E; C; 1F26; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI
+1F2F; C; 1F27; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI
+1F38; C; 1F30; # GREEK CAPITAL LETTER IOTA WITH PSILI
+1F39; C; 1F31; # GREEK CAPITAL LETTER IOTA WITH DASIA
+1F3A; C; 1F32; # GREEK CAPITAL LETTER IOTA WITH PSILI AND VARIA
+1F3B; C; 1F33; # GREEK CAPITAL LETTER IOTA WITH DASIA AND VARIA
+1F3C; C; 1F34; # GREEK CAPITAL LETTER IOTA WITH PSILI AND OXIA
+1F3D; C; 1F35; # GREEK CAPITAL LETTER IOTA WITH DASIA AND OXIA
+1F3E; C; 1F36; # GREEK CAPITAL LETTER IOTA WITH PSILI AND PERISPOMENI
+1F3F; C; 1F37; # GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI
+1F48; C; 1F40; # GREEK CAPITAL LETTER OMICRON WITH PSILI
+1F49; C; 1F41; # GREEK CAPITAL LETTER OMICRON WITH DASIA
+1F4A; C; 1F42; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND VARIA
+1F4B; C; 1F43; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND VARIA
+1F4C; C; 1F44; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND OXIA
+1F4D; C; 1F45; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA
+1F50; F; 03C5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI
+1F52; F; 03C5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA
+1F54; F; 03C5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA
+1F56; F; 03C5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI
+1F59; C; 1F51; # GREEK CAPITAL LETTER UPSILON WITH DASIA
+1F5B; C; 1F53; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA
+1F5D; C; 1F55; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA
+1F5F; C; 1F57; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI
+1F68; C; 1F60; # GREEK CAPITAL LETTER OMEGA WITH PSILI
+1F69; C; 1F61; # GREEK CAPITAL LETTER OMEGA WITH DASIA
+1F6A; C; 1F62; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA
+1F6B; C; 1F63; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA
+1F6C; C; 1F64; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA
+1F6D; C; 1F65; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA
+1F6E; C; 1F66; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI
+1F6F; C; 1F67; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI
+1F80; F; 1F00 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI
+1F81; F; 1F01 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI
+1F82; F; 1F02 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F83; F; 1F03 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F84; F; 1F04 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F85; F; 1F05 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F86; F; 1F06 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F87; F; 1F07 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F88; F; 1F00 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F88; S; 1F80; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F89; F; 1F01 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F89; S; 1F81; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F8A; F; 1F02 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8A; S; 1F82; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8B; F; 1F03 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8B; S; 1F83; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8C; F; 1F04 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8C; S; 1F84; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8D; F; 1F05 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8D; S; 1F85; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8E; F; 1F06 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8E; S; 1F86; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; F; 1F07 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; S; 1F87; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F90; F; 1F20 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI
+1F91; F; 1F21 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI
+1F92; F; 1F22 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F93; F; 1F23 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F94; F; 1F24 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F95; F; 1F25 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F96; F; 1F26 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F97; F; 1F27 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F98; F; 1F20 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F98; S; 1F90; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F99; F; 1F21 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F99; S; 1F91; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F9A; F; 1F22 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9A; S; 1F92; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9B; F; 1F23 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9B; S; 1F93; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9C; F; 1F24 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9C; S; 1F94; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9D; F; 1F25 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9D; S; 1F95; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9E; F; 1F26 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9E; S; 1F96; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; F; 1F27 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; S; 1F97; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FA0; F; 1F60 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI
+1FA1; F; 1F61 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI
+1FA2; F; 1F62 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1FA3; F; 1F63 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1FA4; F; 1F64 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1FA5; F; 1F65 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1FA6; F; 1F66 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1FA7; F; 1F67 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1FA8; F; 1F60 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA8; S; 1FA0; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA9; F; 1F61 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FA9; S; 1FA1; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FAA; F; 1F62 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAA; S; 1FA2; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAB; F; 1F63 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAB; S; 1FA3; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAC; F; 1F64 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAC; S; 1FA4; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAD; F; 1F65 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAD; S; 1FA5; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAE; F; 1F66 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAE; S; 1FA6; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; F; 1F67 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; S; 1FA7; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FB2; F; 1F70 03B9; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI
+1FB3; F; 03B1 03B9; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI
+1FB4; F; 03AC 03B9; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI
+1FB6; F; 03B1 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI
+1FB7; F; 03B1 0342 03B9; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FB8; C; 1FB0; # GREEK CAPITAL LETTER ALPHA WITH VRACHY
+1FB9; C; 1FB1; # GREEK CAPITAL LETTER ALPHA WITH MACRON
+1FBA; C; 1F70; # GREEK CAPITAL LETTER ALPHA WITH VARIA
+1FBB; C; 1F71; # GREEK CAPITAL LETTER ALPHA WITH OXIA
+1FBC; F; 03B1 03B9; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBC; S; 1FB3; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBE; C; 03B9; # GREEK PROSGEGRAMMENI
+1FC2; F; 1F74 03B9; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI
+1FC3; F; 03B7 03B9; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI
+1FC4; F; 03AE 03B9; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI
+1FC6; F; 03B7 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI
+1FC7; F; 03B7 0342 03B9; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FC8; C; 1F72; # GREEK CAPITAL LETTER EPSILON WITH VARIA
+1FC9; C; 1F73; # GREEK CAPITAL LETTER EPSILON WITH OXIA
+1FCA; C; 1F74; # GREEK CAPITAL LETTER ETA WITH VARIA
+1FCB; C; 1F75; # GREEK CAPITAL LETTER ETA WITH OXIA
+1FCC; F; 03B7 03B9; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FCC; S; 1FC3; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FD2; F; 03B9 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA
+1FD3; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
+1FD6; F; 03B9 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI
+1FD7; F; 03B9 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI
+1FD8; C; 1FD0; # GREEK CAPITAL LETTER IOTA WITH VRACHY
+1FD9; C; 1FD1; # GREEK CAPITAL LETTER IOTA WITH MACRON
+1FDA; C; 1F76; # GREEK CAPITAL LETTER IOTA WITH VARIA
+1FDB; C; 1F77; # GREEK CAPITAL LETTER IOTA WITH OXIA
+1FE2; F; 03C5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA
+1FE3; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
+1FE4; F; 03C1 0313; # GREEK SMALL LETTER RHO WITH PSILI
+1FE6; F; 03C5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI
+1FE7; F; 03C5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI
+1FE8; C; 1FE0; # GREEK CAPITAL LETTER UPSILON WITH VRACHY
+1FE9; C; 1FE1; # GREEK CAPITAL LETTER UPSILON WITH MACRON
+1FEA; C; 1F7A; # GREEK CAPITAL LETTER UPSILON WITH VARIA
+1FEB; C; 1F7B; # GREEK CAPITAL LETTER UPSILON WITH OXIA
+1FEC; C; 1FE5; # GREEK CAPITAL LETTER RHO WITH DASIA
+1FF2; F; 1F7C 03B9; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI
+1FF3; F; 03C9 03B9; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI
+1FF4; F; 03CE 03B9; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI
+1FF6; F; 03C9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI
+1FF7; F; 03C9 0342 03B9; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FF8; C; 1F78; # GREEK CAPITAL LETTER OMICRON WITH VARIA
+1FF9; C; 1F79; # GREEK CAPITAL LETTER OMICRON WITH OXIA
+1FFA; C; 1F7C; # GREEK CAPITAL LETTER OMEGA WITH VARIA
+1FFB; C; 1F7D; # GREEK CAPITAL LETTER OMEGA WITH OXIA
+1FFC; F; 03C9 03B9; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+1FFC; S; 1FF3; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+2126; C; 03C9; # OHM SIGN
+212A; C; 006B; # KELVIN SIGN
+212B; C; 00E5; # ANGSTROM SIGN
+2132; C; 214E; # TURNED CAPITAL F
+2160; C; 2170; # ROMAN NUMERAL ONE
+2161; C; 2171; # ROMAN NUMERAL TWO
+2162; C; 2172; # ROMAN NUMERAL THREE
+2163; C; 2173; # ROMAN NUMERAL FOUR
+2164; C; 2174; # ROMAN NUMERAL FIVE
+2165; C; 2175; # ROMAN NUMERAL SIX
+2166; C; 2176; # ROMAN NUMERAL SEVEN
+2167; C; 2177; # ROMAN NUMERAL EIGHT
+2168; C; 2178; # ROMAN NUMERAL NINE
+2169; C; 2179; # ROMAN NUMERAL TEN
+216A; C; 217A; # ROMAN NUMERAL ELEVEN
+216B; C; 217B; # ROMAN NUMERAL TWELVE
+216C; C; 217C; # ROMAN NUMERAL FIFTY
+216D; C; 217D; # ROMAN NUMERAL ONE HUNDRED
+216E; C; 217E; # ROMAN NUMERAL FIVE HUNDRED
+216F; C; 217F; # ROMAN NUMERAL ONE THOUSAND
+2183; C; 2184; # ROMAN NUMERAL REVERSED ONE HUNDRED
+24B6; C; 24D0; # CIRCLED LATIN CAPITAL LETTER A
+24B7; C; 24D1; # CIRCLED LATIN CAPITAL LETTER B
+24B8; C; 24D2; # CIRCLED LATIN CAPITAL LETTER C
+24B9; C; 24D3; # CIRCLED LATIN CAPITAL LETTER D
+24BA; C; 24D4; # CIRCLED LATIN CAPITAL LETTER E
+24BB; C; 24D5; # CIRCLED LATIN CAPITAL LETTER F
+24BC; C; 24D6; # CIRCLED LATIN CAPITAL LETTER G
+24BD; C; 24D7; # CIRCLED LATIN CAPITAL LETTER H
+24BE; C; 24D8; # CIRCLED LATIN CAPITAL LETTER I
+24BF; C; 24D9; # CIRCLED LATIN CAPITAL LETTER J
+24C0; C; 24DA; # CIRCLED LATIN CAPITAL LETTER K
+24C1; C; 24DB; # CIRCLED LATIN CAPITAL LETTER L
+24C2; C; 24DC; # CIRCLED LATIN CAPITAL LETTER M
+24C3; C; 24DD; # CIRCLED LATIN CAPITAL LETTER N
+24C4; C; 24DE; # CIRCLED LATIN CAPITAL LETTER O
+24C5; C; 24DF; # CIRCLED LATIN CAPITAL LETTER P
+24C6; C; 24E0; # CIRCLED LATIN CAPITAL LETTER Q
+24C7; C; 24E1; # CIRCLED LATIN CAPITAL LETTER R
+24C8; C; 24E2; # CIRCLED LATIN CAPITAL LETTER S
+24C9; C; 24E3; # CIRCLED LATIN CAPITAL LETTER T
+24CA; C; 24E4; # CIRCLED LATIN CAPITAL LETTER U
+24CB; C; 24E5; # CIRCLED LATIN CAPITAL LETTER V
+24CC; C; 24E6; # CIRCLED LATIN CAPITAL LETTER W
+24CD; C; 24E7; # CIRCLED LATIN CAPITAL LETTER X
+24CE; C; 24E8; # CIRCLED LATIN CAPITAL LETTER Y
+24CF; C; 24E9; # CIRCLED LATIN CAPITAL LETTER Z
+2C00; C; 2C30; # GLAGOLITIC CAPITAL LETTER AZU
+2C01; C; 2C31; # GLAGOLITIC CAPITAL LETTER BUKY
+2C02; C; 2C32; # GLAGOLITIC CAPITAL LETTER VEDE
+2C03; C; 2C33; # GLAGOLITIC CAPITAL LETTER GLAGOLI
+2C04; C; 2C34; # GLAGOLITIC CAPITAL LETTER DOBRO
+2C05; C; 2C35; # GLAGOLITIC CAPITAL LETTER YESTU
+2C06; C; 2C36; # GLAGOLITIC CAPITAL LETTER ZHIVETE
+2C07; C; 2C37; # GLAGOLITIC CAPITAL LETTER DZELO
+2C08; C; 2C38; # GLAGOLITIC CAPITAL LETTER ZEMLJA
+2C09; C; 2C39; # GLAGOLITIC CAPITAL LETTER IZHE
+2C0A; C; 2C3A; # GLAGOLITIC CAPITAL LETTER INITIAL IZHE
+2C0B; C; 2C3B; # GLAGOLITIC CAPITAL LETTER I
+2C0C; C; 2C3C; # GLAGOLITIC CAPITAL LETTER DJERVI
+2C0D; C; 2C3D; # GLAGOLITIC CAPITAL LETTER KAKO
+2C0E; C; 2C3E; # GLAGOLITIC CAPITAL LETTER LJUDIJE
+2C0F; C; 2C3F; # GLAGOLITIC CAPITAL LETTER MYSLITE
+2C10; C; 2C40; # GLAGOLITIC CAPITAL LETTER NASHI
+2C11; C; 2C41; # GLAGOLITIC CAPITAL LETTER ONU
+2C12; C; 2C42; # GLAGOLITIC CAPITAL LETTER POKOJI
+2C13; C; 2C43; # GLAGOLITIC CAPITAL LETTER RITSI
+2C14; C; 2C44; # GLAGOLITIC CAPITAL LETTER SLOVO
+2C15; C; 2C45; # GLAGOLITIC CAPITAL LETTER TVRIDO
+2C16; C; 2C46; # GLAGOLITIC CAPITAL LETTER UKU
+2C17; C; 2C47; # GLAGOLITIC CAPITAL LETTER FRITU
+2C18; C; 2C48; # GLAGOLITIC CAPITAL LETTER HERU
+2C19; C; 2C49; # GLAGOLITIC CAPITAL LETTER OTU
+2C1A; C; 2C4A; # GLAGOLITIC CAPITAL LETTER PE
+2C1B; C; 2C4B; # GLAGOLITIC CAPITAL LETTER SHTA
+2C1C; C; 2C4C; # GLAGOLITIC CAPITAL LETTER TSI
+2C1D; C; 2C4D; # GLAGOLITIC CAPITAL LETTER CHRIVI
+2C1E; C; 2C4E; # GLAGOLITIC CAPITAL LETTER SHA
+2C1F; C; 2C4F; # GLAGOLITIC CAPITAL LETTER YERU
+2C20; C; 2C50; # GLAGOLITIC CAPITAL LETTER YERI
+2C21; C; 2C51; # GLAGOLITIC CAPITAL LETTER YATI
+2C22; C; 2C52; # GLAGOLITIC CAPITAL LETTER SPIDERY HA
+2C23; C; 2C53; # GLAGOLITIC CAPITAL LETTER YU
+2C24; C; 2C54; # GLAGOLITIC CAPITAL LETTER SMALL YUS
+2C25; C; 2C55; # GLAGOLITIC CAPITAL LETTER SMALL YUS WITH TAIL
+2C26; C; 2C56; # GLAGOLITIC CAPITAL LETTER YO
+2C27; C; 2C57; # GLAGOLITIC CAPITAL LETTER IOTATED SMALL YUS
+2C28; C; 2C58; # GLAGOLITIC CAPITAL LETTER BIG YUS
+2C29; C; 2C59; # GLAGOLITIC CAPITAL LETTER IOTATED BIG YUS
+2C2A; C; 2C5A; # GLAGOLITIC CAPITAL LETTER FITA
+2C2B; C; 2C5B; # GLAGOLITIC CAPITAL LETTER IZHITSA
+2C2C; C; 2C5C; # GLAGOLITIC CAPITAL LETTER SHTAPIC
+2C2D; C; 2C5D; # GLAGOLITIC CAPITAL LETTER TROKUTASTI A
+2C2E; C; 2C5E; # GLAGOLITIC CAPITAL LETTER LATINATE MYSLITE
+2C60; C; 2C61; # LATIN CAPITAL LETTER L WITH DOUBLE BAR
+2C62; C; 026B; # LATIN CAPITAL LETTER L WITH MIDDLE TILDE
+2C63; C; 1D7D; # LATIN CAPITAL LETTER P WITH STROKE
+2C64; C; 027D; # LATIN CAPITAL LETTER R WITH TAIL
+2C67; C; 2C68; # LATIN CAPITAL LETTER H WITH DESCENDER
+2C69; C; 2C6A; # LATIN CAPITAL LETTER K WITH DESCENDER
+2C6B; C; 2C6C; # LATIN CAPITAL LETTER Z WITH DESCENDER
+2C6D; C; 0251; # LATIN CAPITAL LETTER ALPHA
+2C6E; C; 0271; # LATIN CAPITAL LETTER M WITH HOOK
+2C6F; C; 0250; # LATIN CAPITAL LETTER TURNED A
+2C70; C; 0252; # LATIN CAPITAL LETTER TURNED ALPHA
+2C72; C; 2C73; # LATIN CAPITAL LETTER W WITH HOOK
+2C75; C; 2C76; # LATIN CAPITAL LETTER HALF H
From pypy.commits at gmail.com Thu Aug 11 11:49:27 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:27 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fixed translation for freebsd.
Message-ID: <57ac9e87.44ce1c0a.7a3bd.2997@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86149:575f3ed5760a
Date: 2016-08-02 21:40 +0000
http://bitbucket.org/pypy/pypy/changeset/575f3ed5760a/
Log: Fixed translation for freebsd.
diff --git a/pypy/module/_posixsubprocess/_posixsubprocess.c b/pypy/module/_posixsubprocess/_posixsubprocess.c
--- a/pypy/module/_posixsubprocess/_posixsubprocess.c
+++ b/pypy/module/_posixsubprocess/_posixsubprocess.c
@@ -12,7 +12,7 @@
#ifdef HAVE_SYS_TYPES_H
#include
#endif
-#if defined(HAVE_SYS_STAT_H) && defined(__FreeBSD__)
+#if defined(__FreeBSD__)
#include
#endif
#ifdef HAVE_SYS_SYSCALL_H
From pypy.commits at gmail.com Thu Aug 11 11:49:31 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:31 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Reapply the fix from issue 2348.
Translation works now.
Message-ID: <57ac9e8b.497bc20a.13214.23d7@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86151:94394e83a162
Date: 2016-08-09 16:17 +0000
http://bitbucket.org/pypy/pypy/changeset/94394e83a162/
Log: Reapply the fix from issue 2348. Translation works now.
diff --git a/rpython/translator/c/src/precommondefs.h b/rpython/translator/c/src/precommondefs.h
--- a/rpython/translator/c/src/precommondefs.h
+++ b/rpython/translator/c/src/precommondefs.h
@@ -20,7 +20,7 @@
#define _NETBSD_SOURCE 1
/* Define to activate features from IEEE Stds 1003.1-2001 */
#ifndef _POSIX_C_SOURCE
-# define _POSIX_C_SOURCE 200112L
+# define _POSIX_C_SOURCE 200809L
#endif
/* Define on FreeBSD to activate all library features */
#define __BSD_VISIBLE 1
From pypy.commits at gmail.com Thu Aug 11 11:49:29 2016
From: pypy.commits at gmail.com (marky1991)
Date: Thu, 11 Aug 2016 08:49:29 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Reduce diff with cpython. (thanks for
pointing this out, David\!)
Message-ID: <57ac9e89.8cc51c0a.8af16.43a4@mx.google.com>
Author: Mark Young
Branch: py3k
Changeset: r86150:51fa15119324
Date: 2016-08-04 11:08 -0400
http://bitbucket.org/pypy/pypy/changeset/51fa15119324/
Log: Reduce diff with cpython. (thanks for pointing this out, David\!)
diff --git a/pypy/module/_posixsubprocess/_posixsubprocess.c b/pypy/module/_posixsubprocess/_posixsubprocess.c
--- a/pypy/module/_posixsubprocess/_posixsubprocess.c
+++ b/pypy/module/_posixsubprocess/_posixsubprocess.c
@@ -12,7 +12,7 @@
#ifdef HAVE_SYS_TYPES_H
#include
#endif
-#if defined(__FreeBSD__)
+#if defined(HAVE_SYS_STAT_H) && defined(__FreeBSD__)
#include
#endif
#ifdef HAVE_SYS_SYSCALL_H
diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py
--- a/pypy/module/_posixsubprocess/interp_subprocess.py
+++ b/pypy/module/_posixsubprocess/interp_subprocess.py
@@ -15,8 +15,9 @@
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
- includes=['unistd.h', 'sys/syscall.h'])
+ includes=['unistd.h', 'sys/syscall.h', 'sys/stat.h'])
HAVE_SYS_SYSCALL_H = platform.Has("syscall")
+ HAVE_SYS_STAT_H = platform.Has("stat")
HAVE_SETSID = platform.Has("setsid")
config = platform.configure(CConfig)
@@ -29,6 +30,8 @@
compile_extra = []
if config['HAVE_SYS_SYSCALL_H']:
compile_extra.append("-DHAVE_SYS_SYSCALL_H")
+if config['HAVE_SYS_STAT_H']:
+ compile_extra.append("-DHAVE_SYS_STAT_H")
if config['HAVE_SETSID']:
compile_extra.append("-DHAVE_SETSID")
From pypy.commits at gmail.com Thu Aug 11 12:02:24 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 09:02:24 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Threads: in-progress
Message-ID: <57aca190.c3881c0a.282f.49ea@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86153:a852dc954edc
Date: 2016-08-11 18:01 +0200
http://bitbucket.org/pypy/pypy/changeset/a852dc954edc/
Log: Threads: in-progress
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -591,6 +591,13 @@
# This class is tweaked to generate one byte per _SIG_TICKER_COUNT
# bytecodes, at the expense of not reacting to signals instantly.
+ # Threads: after 10'000 calls to decrement_ticker(), it should
+ # return -1. It should also return -1 if there was a signal.
+ # This is done by calling _update_ticker_from_signals() every 100
+ # calls, and invoking rsignal.pypysig_check_and_reset(); this in
+ # turn returns -1 if there was a signal or if it was called 100
+ # times.
+
_SIG_TICKER_COUNT = 100
_ticker = 0
_ticker_count = _SIG_TICKER_COUNT * 10
@@ -610,10 +617,10 @@
if c < 0:
c = self._update_ticker_from_signals()
self._ticker_count = c
- if self.has_bytecode_counter: # this 'if' is constant-folded
- print ("RDBSignalActionFlag: has_bytecode_counter: "
- "not supported for now")
- raise NotImplementedError
+ #if self.has_bytecode_counter: # this 'if' is constant-folded
+ # print ("RDBSignalActionFlag: has_bytecode_counter: "
+ # "not supported for now")
+ # raise NotImplementedError
return self._ticker
def _update_ticker_from_signals(self):
diff --git a/rpython/translator/c/src/signals.h b/rpython/translator/c/src/signals.h
--- a/rpython/translator/c/src/signals.h
+++ b/rpython/translator/c/src/signals.h
@@ -39,8 +39,9 @@
inline static char pypysig_check_and_reset(void) {
/* used by reverse_debugging */
- char result = pypysig_counter.value < 0;
- pypysig_counter.value = 0;
+ char result = --pypysig_counter.value < 0;
+ if (result)
+ pypysig_counter.value = 100;
return result;
}
From pypy.commits at gmail.com Thu Aug 11 12:07:43 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 09:07:43 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Non-passing test
Message-ID: <57aca2cf.411d1c0a.db3ce.3059@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86154:1793bd4d7a98
Date: 2016-08-11 18:06 +0200
http://bitbucket.org/pypy/pypy/changeset/1793bd4d7a98/
Log: Non-passing test
diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py
--- a/rpython/translator/revdb/test/test_thread.py
+++ b/rpython/translator/revdb/test/test_thread.py
@@ -78,6 +78,64 @@
rdb.write_call("AAAA\n")
rdb.done()
+ def test_threadlocal(self):
+ class EC(object):
+ def __init__(self, value):
+ self.value = value
+ raw_thread_local = rthread.ThreadLocalReference(EC)
+
+ def bootstrap():
+ rthread.gc_thread_start()
+ _sleep(1)
+ ec = EC(4567)
+ raw_thread_local.set(ec)
+ print raw_thread_local.get().value
+ assert raw_thread_local.get() is ec
+ rthread.gc_thread_die()
+
+ def main(argv):
+ ec = EC(12)
+ raw_thread_local.set(ec)
+ rthread.start_new_thread(bootstrap, ())
+ _sleep(2)
+ print raw_thread_local.get().value
+ assert raw_thread_local.get() is ec
+ return 9
+
+ self.compile(main, backendopt=False, thread=True)
+ out = self.run('Xx')
+ # should have printed 4567 and 12
+ rdb = self.fetch_rdb([self.exename, 'Xx'])
+ th_A = rdb.main_thread_id
+ rdb.same_stack() # RPyGilAllocate()
+ rdb.gil_release()
+
+ th_B = rdb.switch_thread()
+ assert th_B != th_A
+ b = rdb.next('!h'); assert 300 <= b < 310 # "callback": start thread
+ rdb.gil_acquire()
+ rdb.gil_release()
+
+ rdb.switch_thread(th_A)
+ rdb.same_stack() # start_new_thread returns
+ x = rdb.next(); assert x == th_B # result is the 'th_B' id
+ rdb.gil_acquire()
+ rdb.gil_release()
+
+ rdb.switch_thread(th_B)
+ rdb.same_stack() # sleep() (finishes here)
+ rdb.next('i') # sleep()
+ rdb.gil_acquire()
+ rdb.write_call("4567\n")
+ rdb.gil_release()
+
+ rdb.switch_thread(th_A)
+ rdb.same_stack() # sleep()
+ rdb.next('i') # sleep()
+ rdb.gil_acquire()
+ rdb.write_call("12\n")
+ rdb.done()
+
class TestThreadInteractive(InteractiveTests):
expected_stop_points = 5
From pypy.commits at gmail.com Thu Aug 11 12:10:54 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 09:10:54 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Fix test
Message-ID: <57aca38e.c15e1c0a.983f3.4d78@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86155:fb9456b9dfb4
Date: 2016-08-11 18:10 +0200
http://bitbucket.org/pypy/pypy/changeset/fb9456b9dfb4/
Log: Fix test
diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py
--- a/rpython/rlib/rthread.py
+++ b/rpython/rlib/rthread.py
@@ -366,7 +366,6 @@
ThreadLocalReference._COUNT += 1
ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id,
loop_invariant=loop_invariant)
- setraw = self.setraw
offset = self._offset
def get():
@@ -383,10 +382,10 @@
def set(value):
assert isinstance(value, Cls) or value is None
if we_are_translated():
- from rpython.rtyper.annlowlevel import cast_instance_to_gcref
- gcref = cast_instance_to_gcref(value)
- value = lltype.cast_ptr_to_int(gcref)
- setraw(value)
+ from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
+ ptr = cast_instance_to_base_ptr(value)
+ _threadlocalref_seeme(self)
+ llop.threadlocalref_store(lltype.Void, offset, ptr)
rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref)
rgc.ll_writebarrier(_tracetlref_obj)
else:
From pypy.commits at gmail.com Thu Aug 11 13:29:54 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Thu, 11 Aug 2016 10:29:54 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix and rename
test_crap_after_starargs (allowed in PEP 448),
dirty fix in function calls if argument order is reversed (happens if
stararg occurs after kwarg)
Message-ID: <57acb612.411d1c0a.db3ce.4ba6@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86156:ab59452c8103
Date: 2016-08-11 19:29 +0200
http://bitbucket.org/pypy/pypy/changeset/ab59452c8103/
Log: Fix and rename test_crap_after_starargs (allowed in PEP 448), dirty
fix in function calls if argument order is reversed (happens if
stararg occurs after kwarg)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -779,9 +779,19 @@
py.test.raises(SyntaxError, self.simple_test, "int(base=10, '2')",
None, None)
- def test_crap_after_starargs(self):
- source = "call(*args, *args)"
- py.test.raises(SyntaxError, self.simple_test, source, None, None)
+ def test_starargs_after_starargs(self):
+ #allowed since PEP 448 "Additional Unpacking Generalizations"
+ source = py.code.Source("""
+ def call(*arg):
+ ret = []
+ for i in arg:
+ ret.append(i)
+ return ret
+
+ args = [4,5,6]
+ res = call(*args, *args)
+ """)
+ self.simple_test(source, 'res', [4,5,6,4,5,6])
def test_not_a_name(self):
source = "call(a, b, c, 3=3)"
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1214,6 +1214,14 @@
break
w_value = self.popvalue()
w_key = self.popvalue()
+ # temporary (dirty) fix: if star-arg occurs after kwarg,
+ # arg order is reversed on stack
+ from pypy.objspace.std.listobject import W_ListObject
+ if isinstance(w_key, W_ListObject):
+ w_key_temp = w_key
+ w_key = w_value
+ w_value = w_star
+ w_star = w_key_temp
key = self.space.identifier_w(w_key)
keywords[n_keywords] = key
keywords_w[n_keywords] = w_value
From pypy.commits at gmail.com Thu Aug 11 13:42:55 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Thu, 11 Aug 2016 10:42:55 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix compiler test (positional
arguments can follow starred arguments)
Message-ID: <57acb91f.271ac20a.d0248.491d@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86157:18060b1572d4
Date: 2016-08-11 19:42 +0200
http://bitbucket.org/pypy/pypy/changeset/18060b1572d4/
Log: Fix compiler test (positional arguments can follow starred
arguments)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1075,6 +1075,13 @@
return a, b, c
"""
yield self.st, func, "f()", (1, [2, 3], 4)
+ func = """def f():
+ b = [4,5,6]
+ c = 7
+ a = [*b, c]
+ return a
+ """
+ yield self.st, func, "f()", [4, 5, 6, 7]
def test_extended_unpacking_fail(self):
exc = py.test.raises(SyntaxError, self.simple_test, "*a, *b = [1, 2]",
@@ -1084,9 +1091,6 @@
"[*b, *c] = range(10)", None, None).value
assert exc.msg == "two starred expressions in assignment"
- exc = py.test.raises(SyntaxError, self.simple_test, "a = [*b, c]",
- None, None).value
- assert exc.msg == "can use starred expression only as assignment target"
exc = py.test.raises(SyntaxError, self.simple_test, "for *a in x: pass",
None, None).value
assert exc.msg == "starred assignment target must be in a list or tuple"
From pypy.commits at gmail.com Thu Aug 11 13:58:04 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 10:58:04 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Hopefully correct handling of
thread-locals
Message-ID: <57acbcac.8f8e1c0a.8df7f.6bfe@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86158:0981c3747dd3
Date: 2016-08-11 19:57 +0200
http://bitbucket.org/pypy/pypy/changeset/0981c3747dd3/
Log: Hopefully correct handling of thread-locals
diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py
--- a/rpython/translator/c/genc.py
+++ b/rpython/translator/c/genc.py
@@ -778,6 +778,10 @@
for field in fields:
print >> f, ('#define RPY_TLOFS_%s offsetof(' % field.fieldname +
'struct pypy_threadlocal_s, %s)' % field.fieldname)
+ if fields:
+ print >> f, '#define RPY_TLOFSFIRST RPY_TLOFS_%s' % fields[0].fieldname
+ else:
+ print >> f, '#define RPY_TLOFSFIRST sizeof(struct pypy_threadlocal_s)'
print >> f, 'struct pypy_threadlocal_s {'
print >> f, '\tint ready;'
print >> f, '\tchar *stack_end;'
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -26,6 +26,7 @@
#include "revdb_def.h"
#include "src/rtyper.h"
#include "src/mem.h"
+#include "src/threadlocal.h"
#include "src-revdb/revdb_include.h"
#define RDB_SIGNATURE "RevDB:"
@@ -673,6 +674,7 @@
struct replay_thread_s {
uint64_t tid;
stacklet_handle h;
+ struct pypy_threadlocal_s tloc;
};
static stacklet_handle replay_thread_main(stacklet_handle h, void *arg)
@@ -737,6 +739,7 @@
can switch it away at any point later */
struct replay_thread_main_s m;
stacklet_handle h;
+ struct pypy_threadlocal_s *real_tloc = NULL;
m.entry_point = entry_point;
m.argc = argc;
m.argv = argv;
@@ -750,6 +753,10 @@
while (1) {
struct replay_thread_s *node, **item, dummy;
+ if (real_tloc == NULL) {
+ _OP_THREADLOCALREF_ADDR_SIGHANDLER(real_tloc);
+ }
+
if (h == NULL)
goto out_of_memory;
@@ -761,6 +768,12 @@
goto out_of_memory;
node->tid = current_thread_id;
node->h = h;
+ /* save the thread-locals, if any */
+ if (real_tloc != NULL)
+ node->tloc = *real_tloc;
+ else
+ memset(&node->tloc, 0, sizeof(node->tloc));
+
item = tsearch(node, &thread_tree_root, compare_replay_thread);
if (item == NULL)
goto out_of_memory;
@@ -780,6 +793,9 @@
item = tfind(&dummy, &thread_tree_root, compare_replay_thread);
if (item == NULL) {
/* it's a new thread, start it now */
+ if (real_tloc != NULL)
+ memset(((char *)real_tloc) + RPY_TLOFSFIRST, 0,
+ sizeof(struct pypy_threadlocal_s) - RPY_TLOFSFIRST);
h = stacklet_new(st_thread, replay_thread_sub, NULL);
}
else {
@@ -787,6 +803,8 @@
assert(node->tid == target_thread_id);
h = node->h;
tdelete(node, &thread_tree_root, compare_replay_thread);
+ if (real_tloc != NULL)
+ *real_tloc = node->tloc;
free(node);
h = stacklet_switch(h);
@@ -1068,7 +1086,10 @@
target_thread_id = fetch_async_block();
_RPY_REVDB_PRINT("[THRD]", target_thread_id);
rpy_revdb.buf_limit = rpy_revdb.buf_p;
- st_outer_controller_h = stacklet_switch(st_outer_controller_h);
+ if (target_thread_id != current_thread_id) {
+ st_outer_controller_h = stacklet_switch(
+ st_outer_controller_h);
+ }
if (rpy_revdb.buf_limit == rpy_revdb.buf_p)
rpy_reverse_db_fetch(__FILE__, __LINE__);
return;
diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py
--- a/rpython/translator/revdb/test/test_thread.py
+++ b/rpython/translator/revdb/test/test_thread.py
@@ -169,3 +169,41 @@
child.expect(ANSWER_READY, i, Ellipsis)
child.send(Message(CMD_FORWARD, 1))
child.expect(ANSWER_AT_END)
+
+
+class TestThreadLocal(InteractiveTests):
+ expected_stop_points = 1
+
+ def setup_class(cls):
+ from rpython.translator.revdb.test.test_basic import compile, run
+ class EC(object):
+ def __init__(self, value):
+ self.value = value
+ raw_thread_local = rthread.ThreadLocalReference(EC)
+
+ def bootstrap():
+ rthread.gc_thread_start()
+ _sleep(1)
+ ec = EC(4567)
+ raw_thread_local.set(ec)
+ revdb.stop_point()
+ print raw_thread_local.get().value
+ assert raw_thread_local.get() is ec
+ rthread.gc_thread_die()
+
+ def main(argv):
+ ec = EC(12)
+ raw_thread_local.set(ec)
+ rthread.start_new_thread(bootstrap, ())
+ _sleep(2)
+ print raw_thread_local.get().value
+ assert raw_thread_local.get() is ec
+ return 9
+
+ compile(cls, main, backendopt=False, thread=True)
+ assert run(cls, '') == '4567\n12\n'
+
+ def test_go_threadlocal(self):
+ child = self.replay()
+ child.send(Message(CMD_FORWARD, 1))
+ child.expect(ANSWER_AT_END)
From pypy.commits at gmail.com Thu Aug 11 14:34:07 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Thu, 11 Aug 2016 11:34:07 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Merge with py3.5-async
Message-ID: <57acc51f.c310c20a.6b7b5.5afb@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86159:83d383a3859c
Date: 2016-08-11 20:31 +0200
http://bitbucket.org/pypy/pypy/changeset/83d383a3859c/
Log: Merge with py3.5-async
diff too long, truncating to 2000 out of 40779 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -26,3 +26,4 @@
40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3
+7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -85,10 +85,11 @@
pass
def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')):
+ import ast
result = []
for word in re_word.findall(line):
if word.startswith('"'):
- word = eval(word)
+ word = ast.literal_eval(word)
result.append(word)
return result
diff --git a/lib-python/2.7/test/test_hash.py b/lib-python/2.7/test/test_hash.py
--- a/lib-python/2.7/test/test_hash.py
+++ b/lib-python/2.7/test/test_hash.py
@@ -174,7 +174,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -85,7 +85,10 @@
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
-def_op('STORE_MAP', 54)
+def_op('GET_AITER', 50)
+def_op('GET_ANEXT', 51)
+def_op('BEFORE_ASYNC_WITH', 52)
+
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
@@ -100,11 +103,12 @@
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
-def_op('STORE_LOCALS', 69)
+def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
+def_op('GET_AWAITABLE', 73)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
@@ -196,6 +200,11 @@
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
+def_op('LOAD_CLASSDEREF', 148)
+hasfree.append(148)
+
+jrel_op('SETUP_ASYNC_WITH', 154)
+
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py
--- a/lib-python/3/test/test_hash.py
+++ b/lib-python/3/test/test_hash.py
@@ -198,7 +198,7 @@
class StringlikeHashRandomizationTests(HashRandomizationTests):
if check_impl_detail(pypy=True):
- EMPTY_STRING_HASH = -1
+ EMPTY_STRING_HASH = -2
else:
EMPTY_STRING_HASH = 0
repr_ = None
diff --git a/lib-python/3/test/test_unicode.py b/lib-python/3/test/test_unicode.py
--- a/lib-python/3/test/test_unicode.py
+++ b/lib-python/3/test/test_unicode.py
@@ -2604,7 +2604,8 @@
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
- self.assertIsNot(args[0], text)
+ if support.check_impl_detail():
+ self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -418,7 +418,7 @@
RegrTest('test_threading.py', usemodules="thread", core=True),
RegrTest('test_threading_local.py', usemodules="thread", core=True),
RegrTest('test_threadsignals.py', usemodules="thread"),
- RegrTest('test_time.py', core=True, usemodules="struct"),
+ RegrTest('test_time.py', core=True, usemodules="struct thread _rawffi"),
RegrTest('test_timeit.py'),
RegrTest('test_timeout.py'),
RegrTest('test_tk.py'),
@@ -452,7 +452,7 @@
RegrTest('test_userstring.py', core=True),
RegrTest('test_uu.py'),
RegrTest('test_uuid.py'),
- RegrTest('test_venv.py'),
+ RegrTest('test_venv.py', usemodules="struct"),
RegrTest('test_wait3.py', usemodules="thread"),
RegrTest('test_wait4.py', usemodules="thread"),
RegrTest('test_warnings.py', core=True),
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -198,10 +198,13 @@
return tp._alignmentofinstances()
@builtinify
-def byref(cdata):
+def byref(cdata, offset=0):
# "pointer" is imported at the end of this module to avoid circular
# imports
- return pointer(cdata)
+ ptr = pointer(cdata)
+ if offset != 0:
+ ptr._buffer[0] += offset
+ return ptr
def cdata_from_address(self, address):
# fix the address: turn it into as unsigned, in case it's a negative number
diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_build.py
@@ -0,0 +1,91 @@
+# Note: uses the CFFI out-of-line ABI mode. We can't use the API
+# mode because ffi.compile() needs to run the compiler, which
+# needs 'subprocess', which needs 'msvcrt' and '_subprocess',
+# which depend on '_pypy_winbase_cffi' already.
+#
+# Note that if you need to regenerate _pypy_winbase_cffi and
+# can't use a preexisting PyPy to do that, then running this
+# file should work as long as 'subprocess' is not imported
+# by cffi. I had to hack in 'cffi._pycparser' to move an
+#'import subprocess' to the inside of a function. (Also,
+# CPython+CFFI should work as well.)
+#
+# This module supports both msvcrt.py and _subprocess.py.
+
+from cffi import FFI
+
+ffi = FFI()
+
+ffi.set_source("_pypy_winbase_cffi", None)
+
+# ---------- MSVCRT ----------
+
+ffi.cdef("""
+typedef unsigned short wint_t;
+
+int _open_osfhandle(intptr_t osfhandle, int flags);
+intptr_t _get_osfhandle(int fd);
+int _setmode(int fd, int mode);
+int _locking(int fd, int mode, long nbytes);
+
+int _kbhit(void);
+int _getch(void);
+wint_t _getwch(void);
+int _getche(void);
+wint_t _getwche(void);
+int _putch(int);
+wint_t _putwch(wchar_t);
+int _ungetch(int);
+wint_t _ungetwch(wint_t);
+""")
+
+# ---------- SUBPROCESS ----------
+
+ffi.cdef("""
+typedef struct {
+ DWORD cb;
+ char * lpReserved;
+ char * lpDesktop;
+ char * lpTitle;
+ DWORD dwX;
+ DWORD dwY;
+ DWORD dwXSize;
+ DWORD dwYSize;
+ DWORD dwXCountChars;
+ DWORD dwYCountChars;
+ DWORD dwFillAttribute;
+ DWORD dwFlags;
+ WORD wShowWindow;
+ WORD cbReserved2;
+ LPBYTE lpReserved2;
+ HANDLE hStdInput;
+ HANDLE hStdOutput;
+ HANDLE hStdError;
+} STARTUPINFO, *LPSTARTUPINFO;
+
+typedef struct {
+ HANDLE hProcess;
+ HANDLE hThread;
+ DWORD dwProcessId;
+ DWORD dwThreadId;
+} PROCESS_INFORMATION, *LPPROCESS_INFORMATION;
+
+DWORD WINAPI GetVersion(void);
+BOOL WINAPI CreatePipe(PHANDLE, PHANDLE, void *, DWORD);
+BOOL WINAPI CloseHandle(HANDLE);
+HANDLE WINAPI GetCurrentProcess(void);
+BOOL WINAPI DuplicateHandle(HANDLE, HANDLE, HANDLE, LPHANDLE,
+ DWORD, BOOL, DWORD);
+BOOL WINAPI CreateProcessA(char *, char *, void *,
+ void *, BOOL, DWORD, char *,
+ char *, LPSTARTUPINFO, LPPROCESS_INFORMATION);
+DWORD WINAPI WaitForSingleObject(HANDLE, DWORD);
+BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD);
+BOOL WINAPI TerminateProcess(HANDLE, UINT);
+HANDLE WINAPI GetStdHandle(DWORD);
+""")
+
+# --------------------
+
+if __name__ == "__main__":
+ ffi.compile()
diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_cffi.py
@@ -0,0 +1,10 @@
+# auto-generated file
+import _cffi_backend
+
+ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
+ _version = 0x2601,
+ _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
+ _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
+ _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
+ _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
+)
diff --git a/lib_pypy/_winapi.py b/lib_pypy/_winapi.py
--- a/lib_pypy/_winapi.py
+++ b/lib_pypy/_winapi.py
@@ -10,152 +10,99 @@
# Declare external Win32 functions
-import ctypes
-
-_kernel32 = ctypes.WinDLL('kernel32')
-
-_CloseHandle = _kernel32.CloseHandle
-_CloseHandle.argtypes = [ctypes.c_int]
-_CloseHandle.restype = ctypes.c_int
-
-_CreatePipe = _kernel32.CreatePipe
-_CreatePipe.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
- ctypes.c_void_p, ctypes.c_int]
-_CreatePipe.restype = ctypes.c_int
-
-_GetCurrentProcess = _kernel32.GetCurrentProcess
-_GetCurrentProcess.argtypes = []
-_GetCurrentProcess.restype = ctypes.c_int
+from _pypy_winbase_cffi import ffi as _ffi
+_kernel32 = _ffi.dlopen('kernel32')
GetVersion = _kernel32.GetVersion
-GetVersion.argtypes = []
-GetVersion.restype = ctypes.c_int
-_DuplicateHandle = _kernel32.DuplicateHandle
-_DuplicateHandle.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
- ctypes.POINTER(ctypes.c_int),
- ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_DuplicateHandle.restype = ctypes.c_int
-_WaitForSingleObject = _kernel32.WaitForSingleObject
-_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
-_WaitForSingleObject.restype = ctypes.c_int
+# Now the _subprocess module implementation
-_GetExitCodeProcess = _kernel32.GetExitCodeProcess
-_GetExitCodeProcess.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
-_GetExitCodeProcess.restype = ctypes.c_int
+def _WinError():
+ code, message = _ffi.getwinerror()
+ raise WindowsError(code, message)
-_TerminateProcess = _kernel32.TerminateProcess
-_TerminateProcess.argtypes = [ctypes.c_int, ctypes.c_int]
-_TerminateProcess.restype = ctypes.c_int
+_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
-_GetStdHandle = _kernel32.GetStdHandle
-_GetStdHandle.argtypes = [ctypes.c_int]
-_GetStdHandle.restype = ctypes.c_int
-
-_GetModuleFileNameW = _kernel32.GetModuleFileNameW
-_GetModuleFileNameW.argtypes = [ctypes.c_int, ctypes.c_wchar_p, ctypes.c_uint]
-_GetModuleFileNameW.restype = ctypes.c_int
-
-class _STARTUPINFO(ctypes.Structure):
- _fields_ = [('cb', ctypes.c_int),
- ('lpReserved', ctypes.c_void_p),
- ('lpDesktop', ctypes.c_char_p),
- ('lpTitle', ctypes.c_char_p),
- ('dwX', ctypes.c_int),
- ('dwY', ctypes.c_int),
- ('dwXSize', ctypes.c_int),
- ('dwYSize', ctypes.c_int),
- ('dwXCountChars', ctypes.c_int),
- ('dwYCountChars', ctypes.c_int),
- ("dwFillAttribute", ctypes.c_int),
- ("dwFlags", ctypes.c_int),
- ("wShowWindow", ctypes.c_short),
- ("cbReserved2", ctypes.c_short),
- ("lpReserved2", ctypes.c_void_p),
- ("hStdInput", ctypes.c_int),
- ("hStdOutput", ctypes.c_int),
- ("hStdError", ctypes.c_int)
- ]
-
-class _PROCESS_INFORMATION(ctypes.Structure):
- _fields_ = [("hProcess", ctypes.c_int),
- ("hThread", ctypes.c_int),
- ("dwProcessID", ctypes.c_int),
- ("dwThreadID", ctypes.c_int)]
-
-_CreateProcess = _kernel32.CreateProcessW
-_CreateProcess.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_void_p,
- ctypes.c_int, ctypes.c_int, ctypes.c_wchar_p, ctypes.c_wchar_p,
- ctypes.POINTER(_STARTUPINFO), ctypes.POINTER(_PROCESS_INFORMATION)]
-_CreateProcess.restype = ctypes.c_int
-
-del ctypes
-
-# Now the _winapi module implementation
-
-from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError
-
-class _handle:
- def __init__(self, handle):
- self.handle = handle
+class _handle(object):
+ def __init__(self, c_handle):
+ # 'c_handle' is a cffi cdata of type HANDLE, which is basically 'void *'
+ self.c_handle = c_handle
+ if int(self) != -1:
+ self.c_handle = _ffi.gc(self.c_handle, _kernel32.CloseHandle)
def __int__(self):
- return self.handle
+ return int(_ffi.cast("intptr_t", self.c_handle))
- def __del__(self):
- if self.handle is not None:
- _CloseHandle(self.handle)
+ def __repr__(self):
+ return '<_subprocess.handle %d at 0x%x>' % (int(self), id(self))
def Detach(self):
- handle, self.handle = self.handle, None
- return handle
+ h = int(self)
+ if h != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ return h
def Close(self):
- if self.handle not in (-1, None):
- _CloseHandle(self.handle)
- self.handle = None
+ if int(self) != -1:
+ c_handle = self.c_handle
+ self.c_handle = _INVALID_HANDLE_VALUE
+ _ffi.gc(c_handle, None)
+ _kernel32.CloseHandle(c_handle)
def CreatePipe(attributes, size):
- read = _c_int()
- write = _c_int()
+ handles = _ffi.new("HANDLE[2]")
- res = _CreatePipe(_byref(read), _byref(write), None, size)
+ res = _kernel32.CreatePipe(handles, handles + 1, _ffi.NULL, size)
if not res:
raise _WinError()
- return _handle(read.value), _handle(write.value)
+ return _handle(handles[0]), _handle(handles[1])
def GetCurrentProcess():
- return _handle(_GetCurrentProcess())
+ return _handle(_kernel32.GetCurrentProcess())
def DuplicateHandle(source_process, source, target_process, access, inherit, options=0):
- target = _c_int()
+ # CPython: the first three arguments are expected to be integers
+ target = _ffi.new("HANDLE[1]")
- res = _DuplicateHandle(int(source_process), int(source), int(target_process),
- _byref(target),
- access, inherit, options)
+ res = _kernel32.DuplicateHandle(
+ _ffi.cast("HANDLE", source_process),
+ _ffi.cast("HANDLE", source),
+ _ffi.cast("HANDLE", target_process),
+ target, access, inherit, options)
if not res:
raise _WinError()
- return _handle(target.value)
+ return _handle(target[0])
+
+def _z(input):
+ if input is None:
+ return _ffi.NULL
+ if isinstance(input, basestring):
+ return str(input)
+ raise TypeError("string/unicode/None expected, got %r" % (
+ type(input).__name__,))
def CreateProcess(name, command_line, process_attr, thread_attr,
inherit, flags, env, start_dir, startup_info):
- si = _STARTUPINFO()
+ si = _ffi.new("STARTUPINFO *")
if startup_info is not None:
si.dwFlags = startup_info.dwFlags
si.wShowWindow = startup_info.wShowWindow
+ # CPython: these three handles are expected to be _handle objects
if startup_info.hStdInput:
- si.hStdInput = int(startup_info.hStdInput)
+ si.hStdInput = startup_info.hStdInput.c_handle
if startup_info.hStdOutput:
- si.hStdOutput = int(startup_info.hStdOutput)
+ si.hStdOutput = startup_info.hStdOutput.c_handle
if startup_info.hStdError:
- si.hStdError = int(startup_info.hStdError)
+ si.hStdError = startup_info.hStdError.c_handle
- pi = _PROCESS_INFORMATION()
+ pi = _ffi.new("PROCESS_INFORMATION *")
flags |= CREATE_UNICODE_ENVIRONMENT
if env is not None:
@@ -164,47 +111,55 @@
envbuf += "%s=%s\0" % (k, v)
envbuf += '\0'
else:
- envbuf = None
+ envbuf = _ffi.NULL
- res = _CreateProcess(name, command_line, None, None, inherit, flags, envbuf,
- start_dir, _byref(si), _byref(pi))
+ res = _kernel32.CreateProcessA(_z(name), _z(command_line), _ffi.NULL,
+ _ffi.NULL, inherit, flags, envbuf,
+ _z(start_dir), si, pi)
if not res:
raise _WinError()
- return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID
+ return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId, pi.dwThreadId
def WaitForSingleObject(handle, milliseconds):
- res = _WaitForSingleObject(int(handle), milliseconds)
-
+ # CPython: the first argument is expected to be an integer.
+ res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
+ milliseconds)
if res < 0:
raise _WinError()
return res
def GetExitCodeProcess(handle):
- code = _c_int()
+ # CPython: the first argument is expected to be an integer.
+ code = _ffi.new("DWORD[1]")
- res = _GetExitCodeProcess(int(handle), _byref(code))
+ res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
if not res:
raise _WinError()
- return code.value
+ return code[0]
def TerminateProcess(handle, exitcode):
- res = _TerminateProcess(int(handle), exitcode)
+ # CPython: the first argument is expected to be an integer.
+ # The second argument is silently wrapped in a UINT.
+ res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+ _ffi.cast("UINT", exitcode))
if not res:
raise _WinError()
def GetStdHandle(stdhandle):
- res = _GetStdHandle(stdhandle)
+ stdhandle = _ffi.cast("DWORD", stdhandle)
+ res = _kernel32.GetStdHandle(stdhandle)
if not res:
return None
else:
- return res
+ # note: returns integer, not handle object
+ return int(_ffi.cast("intptr_t", res))
def CloseHandle(handle):
res = _CloseHandle(handle)
diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
--- a/lib_pypy/cffi/_pycparser/__init__.py
+++ b/lib_pypy/cffi/_pycparser/__init__.py
@@ -10,7 +10,6 @@
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '2.14'
-from subprocess import Popen, PIPE
from .c_parser import CParser
@@ -28,6 +27,7 @@
When successful, returns the preprocessed file's contents.
Errors from cpp will be printed out.
"""
+ from subprocess import Popen, PIPE
path_list = [cpp_path]
if isinstance(cpp_args, list):
path_list += cpp_args
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: greenlet
-Version: 0.4.9
+Version: 0.4.10
Summary: Lightweight in-process concurrent programming
Home-page: https://github.com/python-greenlet/greenlet
Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -2,7 +2,7 @@
import __pypy__
import _continuation
-__version__ = "0.4.9"
+__version__ = "0.4.10"
# ____________________________________________________________
# Exceptions
diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py
--- a/lib_pypy/msvcrt.py
+++ b/lib_pypy/msvcrt.py
@@ -7,26 +7,39 @@
# XXX incomplete: implemented only functions needed by subprocess.py
# PAC: 2010/08 added MS locking for Whoosh
-import ctypes
+# 07/2016: rewrote in CFFI
+
+import sys
+if sys.platform != 'win32':
+ raise ImportError("The 'msvcrt' module is only available on Windows")
+
+import _rawffi
+from _pypy_winbase_cffi import ffi as _ffi
+_lib = _ffi.dlopen(_rawffi.get_libc().name)
+
import errno
-from ctypes_support import standard_c_lib as _c
-from ctypes_support import get_errno
-
-try:
- open_osfhandle = _c._open_osfhandle
-except AttributeError: # we are not on windows
- raise ImportError
try: from __pypy__ import builtinify, validate_fd
except ImportError: builtinify = validate_fd = lambda f: f
-open_osfhandle.argtypes = [ctypes.c_int, ctypes.c_int]
-open_osfhandle.restype = ctypes.c_int
+def _ioerr():
+ e = _ffi.errno
+ raise IOError(e, errno.errorcode[e])
-_get_osfhandle = _c._get_osfhandle
-_get_osfhandle.argtypes = [ctypes.c_int]
-_get_osfhandle.restype = ctypes.c_int
+
+ at builtinify
+def open_osfhandle(fd, flags):
+ """"open_osfhandle(handle, flags) -> file descriptor
+
+ Create a C runtime file descriptor from the file handle handle. The
+ flags parameter should be a bitwise OR of os.O_APPEND, os.O_RDONLY,
+ and os.O_TEXT. The returned file descriptor may be used as a parameter
+ to os.fdopen() to create a file object."""
+ fd = _lib._open_osfhandle(fd, flags)
+ if fd == -1:
+ _ioerr()
+ return fd
@builtinify
def get_osfhandle(fd):
@@ -38,62 +51,74 @@
validate_fd(fd)
except OSError as e:
raise IOError(*e.args)
- return _get_osfhandle(fd)
+ result = _lib._get_osfhandle(fd)
+ if result == -1:
+ _ioerr()
+ return result
-setmode = _c._setmode
-setmode.argtypes = [ctypes.c_int, ctypes.c_int]
-setmode.restype = ctypes.c_int
+ at builtinify
+def setmode(fd, flags):
+ """setmode(fd, mode) -> Previous mode
+
+ Set the line-end translation mode for the file descriptor fd. To set
+ it to text mode, flags should be os.O_TEXT; for binary, it should be
+ os.O_BINARY."""
+ flags = _lib._setmode(fd, flags)
+ if flags == -1:
+ _ioerr()
+ return flags
LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5)
-_locking = _c._locking
-_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_locking.restype = ctypes.c_int
-
@builtinify
def locking(fd, mode, nbytes):
- '''lock or unlock a number of bytes in a file.'''
- rv = _locking(fd, mode, nbytes)
+ """"locking(fd, mode, nbytes) -> None
+
+ Lock part of a file based on file descriptor fd from the C runtime.
+ Raises IOError on failure. The locked region of the file extends from
+ the current file position for nbytes bytes, and may continue beyond
+ the end of the file. mode must be one of the LK_* constants listed
+ below. Multiple regions in a file may be locked at the same time, but
+ may not overlap. Adjacent regions are not merged; they must be unlocked
+ individually."""
+ rv = _lib._locking(fd, mode, nbytes)
if rv != 0:
- e = get_errno()
- raise IOError(e, errno.errorcode[e])
+ _ioerr()
# Console I/O routines
-kbhit = _c._kbhit
-kbhit.argtypes = []
-kbhit.restype = ctypes.c_int
+kbhit = _lib._kbhit
-getch = _c._getch
-getch.argtypes = []
-getch.restype = ctypes.c_char
+ at builtinify
+def getch():
+ return chr(_lib._getch())
-getwch = _c._getwch
-getwch.argtypes = []
-getwch.restype = ctypes.c_wchar
+ at builtinify
+def getwch():
+ return unichr(_lib._getwch())
-getche = _c._getche
-getche.argtypes = []
-getche.restype = ctypes.c_char
+ at builtinify
+def getche():
+ return chr(_lib._getche())
-getwche = _c._getwche
-getwche.argtypes = []
-getwche.restype = ctypes.c_wchar
+ at builtinify
+def getwche():
+ return unichr(_lib._getwche())
-putch = _c._putch
-putch.argtypes = [ctypes.c_char]
-putch.restype = None
+ at builtinify
+def putch(ch):
+ _lib._putch(ord(ch))
-putwch = _c._putwch
-putwch.argtypes = [ctypes.c_wchar]
-putwch.restype = None
+ at builtinify
+def putwch(ch):
+ _lib._putwch(ord(ch))
-ungetch = _c._ungetch
-ungetch.argtypes = [ctypes.c_char]
-ungetch.restype = None
+ at builtinify
+def ungetch(ch):
+ if _lib._ungetch(ord(ch)) == -1: # EOF
+ _ioerr()
-ungetwch = _c._ungetwch
-ungetwch.argtypes = [ctypes.c_wchar]
-ungetwch.restype = None
-
-del ctypes
+ at builtinify
+def ungetwch(ch):
+ if _lib._ungetwch(ord(ch)) == -1: # EOF
+ _ioerr()
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -40,7 +40,7 @@
"binascii", "_multiprocessing", '_warnings', "_collections",
"_multibytecodec", "_continuation", "_cffi_backend",
"_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy"
- "faulthandler",
+ "faulthandler", "_jitlog",
])
from rpython.jit.backend import detect_cpu
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -94,6 +94,20 @@
def pytest_pycollect_makemodule(path, parent):
return PyPyModule(path, parent)
+def is_applevel(item):
+ from pypy.tool.pytest.apptest import AppTestFunction
+ return isinstance(item, AppTestFunction)
+
+def pytest_collection_modifyitems(config, items):
+ if config.option.runappdirect:
+ return
+ for item in items:
+ if isinstance(item, py.test.Function):
+ if is_applevel(item):
+ item.add_marker('applevel')
+ else:
+ item.add_marker('interplevel')
+
class PyPyModule(py.test.collect.Module):
""" we take care of collecting classes both at app level
and at interp-level (because we need to stick a space
@@ -128,9 +142,6 @@
if name.startswith('AppTest'):
from pypy.tool.pytest.apptest import AppClassCollector
return AppClassCollector(name, parent=self)
- else:
- from pypy.tool.pytest.inttest import IntClassCollector
- return IntClassCollector(name, parent=self)
elif hasattr(obj, 'func_code') and self.funcnamefilter(name):
if name.startswith('app_test_'):
@@ -138,11 +149,7 @@
"generator app level functions? you must be joking"
from pypy.tool.pytest.apptest import AppTestFunction
return AppTestFunction(name, parent=self)
- elif obj.func_code.co_flags & 32: # generator function
- return pytest.Generator(name, parent=self)
- else:
- from pypy.tool.pytest.inttest import IntTestFunction
- return IntTestFunction(name, parent=self)
+ return super(PyPyModule, self).makeitem(name, obj)
def skip_on_missing_buildoption(**ropts):
__tracebackhide__ = True
@@ -171,28 +178,19 @@
def pytest_runtest_setup(__multicall__, item):
if isinstance(item, py.test.collect.Function):
- appclass = item.getparent(PyPyClassCollector)
+ appclass = item.getparent(py.test.Class)
if appclass is not None:
# Make cls.space and cls.runappdirect available in tests.
spaceconfig = getattr(appclass.obj, 'spaceconfig', None)
if spaceconfig is not None:
from pypy.tool.pytest.objspace import gettestobjspace
appclass.obj.space = gettestobjspace(**spaceconfig)
+ else:
+ appclass.obj.space = LazyObjSpaceGetter()
appclass.obj.runappdirect = option.runappdirect
__multicall__.execute()
-class PyPyClassCollector(py.test.collect.Class):
- # All pypy Test classes have a "space" member.
- def setup(self):
- cls = self.obj
- if not hasattr(cls, 'spaceconfig'):
- cls.space = LazyObjSpaceGetter()
- else:
- assert hasattr(cls, 'space') # set by pytest_runtest_setup
- super(PyPyClassCollector, self).setup()
-
-
def pytest_ignore_collect(path):
return path.check(link=1)
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -104,27 +104,24 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev libgc-dev liblzma-dev
-
-For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
+ tk-dev libgc-dev \
+ liblzma-dev # For lzma on PyPy3.
On Fedora::
dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
- gdbm-devel
-
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
+ gdbm-devel \
+ xz-devel # For lzma on PyPy3.
On SLES11::
zypper install gcc make python-devel pkg-config \
zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
- libexpat-devel libffi-devel python-curses
+ libexpat-devel libffi-devel python-curses \
+ xz-devel # For lzma on PyPy3.
(XXX plus the SLES11 version of libgdbm-dev and tk-dev)
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
-
On Mac OS X, most of these build-time dependencies are installed alongside
the Developer Tools. However, note that in order for the installation to
find them you may need to run::
diff --git a/pypy/doc/config/commandline.txt b/pypy/doc/config/commandline.txt
--- a/pypy/doc/config/commandline.txt
+++ b/pypy/doc/config/commandline.txt
@@ -9,7 +9,7 @@
PyPy Python interpreter options
-------------------------------
-The following options can be used after ``translate.py
+The following options can be used after ``rpython
targetpypystandalone`` or as options to ``py.py``.
.. GENERATE: objspace
@@ -22,7 +22,7 @@
General translation options
---------------------------
-The following are options of ``translate.py``. They must be
+The following are options of ``bin/rpython``. They must be
given before the ``targetxxx`` on the command line.
* `--opt -O:`__ set the optimization level `[0, 1, size, mem, 2, 3]`
diff --git a/pypy/doc/config/index.rst b/pypy/doc/config/index.rst
--- a/pypy/doc/config/index.rst
+++ b/pypy/doc/config/index.rst
@@ -15,12 +15,12 @@
./py.py <`objspace options`_>
-and the ``translate.py`` translation entry
+and the ``rpython/bin/rpython`` translation entry
point which takes arguments of this form:
.. parsed-literal::
- ./translate.py <`translation options`_>
+ ./rpython/bin/rpython <`translation options`_>
For the common case of ```` being ``targetpypystandalone.py``,
you can then pass the `object space options`_ after
@@ -28,7 +28,7 @@
.. parsed-literal::
- ./translate.py <`translation options`_> targetpypystandalone.py <`objspace options`_>
+ ./rpython/bin/rpython <`translation options`_> targetpypystandalone.py <`objspace options`_>
There is an `overview`_ of all command line arguments that can be
passed in either position.
diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst
--- a/pypy/doc/config/opt.rst
+++ b/pypy/doc/config/opt.rst
@@ -4,8 +4,8 @@
This meta-option selects a default set of optimization
settings to use during a translation. Usage::
- translate.py --opt=#
- translate.py -O#
+ bin/rpython --opt=#
+ bin/rpython -O#
where ``#`` is the desired optimization level. The valid choices are:
diff --git a/pypy/doc/config/translation.dont_write_c_files.txt b/pypy/doc/config/translation.dont_write_c_files.txt
--- a/pypy/doc/config/translation.dont_write_c_files.txt
+++ b/pypy/doc/config/translation.dont_write_c_files.txt
@@ -1,4 +1,4 @@
write the generated C files to ``/dev/null`` instead of to the disk. Useful if
-you want to use translate.py as a benchmark and don't want to access the disk.
+you want to use translation as a benchmark and don't want to access the disk.
.. _`translation documentation`: ../translation.html
diff --git a/pypy/doc/config/translation.fork_before.txt b/pypy/doc/config/translation.fork_before.txt
--- a/pypy/doc/config/translation.fork_before.txt
+++ b/pypy/doc/config/translation.fork_before.txt
@@ -1,4 +1,4 @@
This is an option mostly useful when working on the PyPy toolchain. If you use
-it, translate.py will fork before the specified phase. If the translation
+it, translation will fork before the specified phase. If the translation
crashes after that fork, you can fix the bug in the toolchain, and continue
translation at the fork-point.
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -122,7 +122,7 @@
$ hg up reflex-support # optional
# This example shows python, but using pypy-c is faster and uses less memory
- $ python rpython/translator/goal/translate.py --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
+ $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
This will build a ``pypy-c`` that includes the cppyy module, and through that,
Reflex support.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -315,13 +315,28 @@
- ``complex``
+ - ``str`` (empty or single-character strings only)
+
+ - ``unicode`` (empty or single-character strings only)
+
+ - ``tuple`` (empty tuples only)
+
+ - ``frozenset`` (empty frozenset only)
+
This change requires some changes to ``id`` as well. ``id`` fulfills the
following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the
above types will return a value that is computed from the argument, and can
thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
-Notably missing from the list above are ``str`` and ``unicode``. If your
-code relies on comparing strings with ``is``, then it might break in PyPy.
+Note that strings of length 2 or greater can be equal without being
+identical. Similarly, ``x is (2,)`` is not necessarily true even if
+``x`` contains a tuple and ``x == (2,)``. The uniqueness rules apply
+only to the particular cases described above. The ``str``, ``unicode``,
+``tuple`` and ``frozenset`` rules were added in PyPy 5.4; before that, a
+test like ``if x is "?"`` or ``if x is ()`` could fail even if ``x`` was
+equal to ``"?"`` or ``()``. The new behavior added in PyPy 5.4 is
+closer to CPython's, which caches precisely the empty tuple/frozenset,
+and (generally but not always) the strings and unicodes of length <= 1.
Note that for floats there "``is``" only one object per "bit pattern"
of the float. So ``float('nan') is float('nan')`` is true on PyPy,
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -335,3 +335,60 @@
This will disable SELinux's protection and allow PyPy to configure correctly.
Be sure to enable it again if you need it!
+
+
+How should I report a bug?
+--------------------------
+
+Our bug tracker is here: https://bitbucket.org/pypy/pypy/issues/
+
+Missing features or incompatibilities with CPython are considered
+bugs, and they are welcome. (See also our list of `known
+incompatibilities`__.)
+
+.. __: http://pypy.org/compat.html
+
+For bugs of the kind "I'm getting a PyPy crash or a strange
+exception", please note that: **We can't do anything without
+reproducing the bug ourselves**. We cannot do anything with
+tracebacks from gdb, or core dumps. This is not only because the
+standard PyPy is compiled without debug symbols. The real reason is
+that a C-level traceback is usually of no help at all in PyPy.
+Debugging PyPy can be annoying.
+
+In more details:
+
+* First, please give the exact PyPy version, and the OS.
+
+* It might help focus our search if we know if the bug can be
+ reproduced on a "``pypy --jit off``" or not. If "``pypy --jit
+ off``" always works, then the problem might be in the JIT.
+ Otherwise, we know we can ignore that part.
+
+* If you got the bug using only Open Source components, please give a
+ step-by-step guide that we can follow to reproduce the problem
+ ourselves. Don't assume we know anything about any program other
+ than PyPy. We would like a guide that we can follow point by point
+ (without guessing or having to figure things out)
+ on a machine similar to yours, starting from a bare PyPy, until we
+ see the same problem. (If you can, you can try to reduce the number
+ of steps and the time it needs to run, but that is not mandatory.)
+
+* If the bug involves Closed Source components, or just too many Open
+ Source components to install them all ourselves, then maybe you can
+ give us some temporary ssh access to a machine where the bug can be
+ reproduced. Or, maybe we can download a VirtualBox or VMWare
+ virtual machine where the problem occurs.
+
+* If giving us access would require us to use tools other than ssh,
+ make appointments, or sign a NDA, then we can consider a commerical
+ support contract for a small sum of money.
+
+* If even that is not possible for you, then sorry, we can't help.
+
+Of course, you can try to debug the problem yourself, and we can help
+you get started if you ask on the #pypy IRC channel, but be prepared:
+debugging an annoying PyPy problem usually involves quite a lot of gdb
+in auto-generated C code, and at least some knowledge about the
+various components involved, from PyPy's own RPython source code to
+the GC and possibly the JIT.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-pypy2.7-v5.3.1.rst
release-pypy2.7-v5.3.0.rst
release-5.1.1.rst
release-5.1.0.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-pypy2-5.3.1.rst
whatsnew-pypy2-5.3.0.rst
whatsnew-5.1.0.rst
whatsnew-5.0.0.rst
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -39,17 +39,16 @@
library.
If you want to install 3rd party libraries, the most convenient way is
-to install pip_ (unless you want to install virtualenv as explained
-below; then you can directly use pip inside virtualenvs):
+to install pip_ using ensurepip_ (unless you want to install virtualenv as
+explained below; then you can directly use pip inside virtualenvs):
.. code-block:: console
- $ curl -O https://bootstrap.pypa.io/get-pip.py
- $ ./pypy-2.1/bin/pypy get-pip.py
- $ ./pypy-2.1/bin/pip install pygments # for example
+ $ ./pypy-xxx/bin/pypy -m ensurepip
+ $ ./pypy-xxx/bin/pip install pygments # for example
-Third party libraries will be installed in ``pypy-2.1/site-packages``, and
-the scripts in ``pypy-2.1/bin``.
+Third party libraries will be installed in ``pypy-xxx/site-packages``, and
+the scripts in ``pypy-xxx/bin``.
Installing using virtualenv
@@ -61,7 +60,7 @@
checkout::
# from a tarball
- $ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env
+ $ virtualenv -p /opt/pypy-xxx/bin/pypy my-pypy-env
# from the mercurial checkout
$ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env
@@ -69,7 +68,7 @@
Note that bin/python is now a symlink to bin/pypy.
.. _pip: http://pypi.python.org/pypi/pip
-
+.. _ensurepip: https://docs.python.org/2.7/library/ensurepip.html
Building PyPy yourself
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pypy/doc/release-pypy2.7-v5.3.1.rst b/pypy/doc/release-pypy2.7-v5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy2.7-v5.3.1.rst
@@ -0,0 +1,41 @@
+==========
+PyPy 5.3.1
+==========
+
+We have released a bugfix for PyPy2.7-v5.3.0, released last week,
+due to issues_ reported by users.
+
+Thanks to those who reported the issues.
+
+.. _issues: http://doc.pypy.org/en/latest/whatsnew-pypy2-5.3.1.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,13 @@
.. this is a revision shortly after release-pypy2.7-v5.3
.. startrev: 873218a739f1
+.. 418b05f95db5
+Improve CPython compatibility for ``is``. Now code like ``if x is ():``
+works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id .
+
+.. pull request #455
+Add sys.{get,set}dlopenflags, for cpyext extensions.
+
.. branch: fix-gen-dfa
Resolves an issue with the generator script to build the dfa for Python syntax.
@@ -19,3 +26,82 @@
.. branch: s390x-5.3-catchup
Implement the backend related changes for s390x.
+
+.. branch: incminimark-ll_assert
+.. branch: vmprof-openbsd
+
+.. branch: testing-cleanup
+
+Simplify handling of interp-level tests and make it more forward-
+compatible.
+
+.. branch: pyfile-tell
+Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile
+
+.. branch: rw-PyString_AS_STRING
+Allow rw access to the char* returned from PyString_AS_STRING, also refactor
+PyStringObject to look like cpython's and allow subclassing PyString_Type and
+PyUnicode_Type
+
+.. branch: save_socket_errno
+
+Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show
+the errno of the failing system call, but instead some random previous
+errno.
+
+.. branch: PyTuple_Type-subclass
+
+Refactor PyTupleObject to look like cpython's and allow subclassing
+PyTuple_Type
+
+.. branch: call-via-pyobj
+
+Use offsets from PyTypeObject to find actual c function to call rather than
+fixed functions, allows function override after PyType_Ready is called
+
+.. branch: issue2335
+
+Avoid exhausting the stack in the JIT due to successive guard
+failures in the same Python function ending up as successive levels of
+RPython functions, while at app-level the traceback is very short
+
+.. branch: use-madv-free
+
+Try harder to memory to the OS. See e.g. issue #2336. Note that it does
+not show up as a reduction of the VIRT column in ``top``, and the RES
+column might also not show the reduction, particularly on Linux >= 4.5 or
+on OS/X: it uses MADV_FREE, which only marks the pages as returnable to
+the OS if the memory is low.
+
+.. branch: cpyext-slotdefs2
+
+Fill in more slots when creating a PyTypeObject from a W_TypeObject
+More slots are still TBD, like tp_print and richcmp
+
+.. branch: json-surrogates
+
+Align json module decode with the cpython's impl, fixes issue 2345
+
+.. branch: issue2343
+
+Copy CPython's logic more closely for handling of ``__instancecheck__()``
+and ``__subclasscheck__()``. Fixes issue 2343.
+
+.. branch: msvcrt-cffi
+
+Rewrite the Win32 dependencies of 'subprocess' to use cffi instead
+of ctypes. This avoids importing ctypes in many small programs and
+scripts, which in turn avoids enabling threads (because ctypes
+creates callbacks at import time, and callbacks need threads).
+
+.. branch: new-jit-log
+
+The new logging facility that integrates with and adds features to vmprof.com.
+
+.. branch: jitlog-32bit
+
+Resolve issues to use the new logging facility on a 32bit system
+
+.. branch: ep2016sprint
+
+Trying harder to make hash(-1) return -2, like it does on CPython
diff --git a/pypy/doc/whatsnew-pypy2-5.3.1.rst b/pypy/doc/whatsnew-pypy2-5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy2-5.3.1.rst
@@ -0,0 +1,15 @@
+===========================
+What's new in PyPy2.7 5.3.1
+===========================
+
+.. this is a revision shortly after release-pypy2.7-v5.3.0
+.. startrev: f4d726d1a010
+
+
+A bug-fix release, merging these changes:
+
+ * Add include guards to pymem.h, fixes issue #2321
+
+ * Make vmprof build on OpenBSD, from pull request #456
+
+ * Fix ``bytearray('').replace('a', 'ab')``, issue #2324
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -419,13 +419,16 @@
target_depth -= 2
elif (jump_op == ops.SETUP_FINALLY or
jump_op == ops.SETUP_EXCEPT or
- jump_op == ops.SETUP_WITH):
+ jump_op == ops.SETUP_WITH or
+ jump_op == ops.SETUP_ASYNC_WITH):
if jump_op == ops.SETUP_FINALLY:
target_depth += 4
elif jump_op == ops.SETUP_EXCEPT:
target_depth += 4
elif jump_op == ops.SETUP_WITH:
target_depth += 3
+ elif jump_op == ops.SETUP_ASYNC_WITH:
+ target_depth += 3
if target_depth > self._max_depth:
self._max_depth = target_depth
elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
@@ -640,6 +643,13 @@
ops.LOAD_DEREF: 1,
ops.STORE_DEREF: -1,
ops.DELETE_DEREF: 0,
+
+ ops.GET_AWAITABLE: 0,
+ ops.SETUP_ASYNC_WITH: 2,
+ ops.BEFORE_ASYNC_WITH: -1,
+ ops.GET_AITER: 0,
+ ops.GET_ANEXT: 1,
+ ops.GET_YIELD_FROM_ITER: 0,
ops.LOAD_CONST: 1,
@@ -658,6 +668,8 @@
# TODO
ops.BUILD_LIST_FROM_ARG: 1,
+ # TODO
+ ops.LOAD_CLASSDEREF: 1,
}
diff --git a/pypy/interpreter/astcompiler/assemble.py.orig b/pypy/interpreter/astcompiler/assemble.py.orig
deleted file mode 100644
--- a/pypy/interpreter/astcompiler/assemble.py.orig
+++ /dev/null
@@ -1,765 +0,0 @@
-"""Python control flow graph generation and bytecode assembly."""
-
-import os
-from rpython.rlib import rfloat
-from rpython.rlib.objectmodel import specialize, we_are_translated
-
-from pypy.interpreter.astcompiler import ast, consts, misc, symtable
-from pypy.interpreter.error import OperationError
-from pypy.interpreter.pycode import PyCode
-from pypy.tool import stdlib_opcode as ops
-
-
-class StackDepthComputationError(Exception):
- pass
-
-
-class Instruction(object):
- """Represents a single opcode."""
-
- def __init__(self, opcode, arg=0):
- self.opcode = opcode
- self.arg = arg
- self.lineno = 0
- self.has_jump = False
-
- def size(self):
- """Return the size of bytes of this instruction when it is
- encoded.
- """
- if self.opcode >= ops.HAVE_ARGUMENT:
- return (6 if self.arg > 0xFFFF else 3)
- return 1
-
- def jump_to(self, target, absolute=False):
- """Indicate the target this jump instruction.
-
- The opcode must be a JUMP opcode.
- """
- self.jump = (target, absolute)
- self.has_jump = True
-
- def __repr__(self):
- data = [ops.opname[self.opcode]]
- template = "<%s"
- if self.opcode >= ops.HAVE_ARGUMENT:
- data.append(self.arg)
- template += " %i"
- if self.has_jump:
- data.append(self.jump[0])
- template += " %s"
- template += ">"
- return template % tuple(data)
-
-
-class Block(object):
- """A basic control flow block.
-
- It has one entry point and several possible exit points. Its
- instructions may be jumps to other blocks, or if control flow
- reaches the end of the block, it continues to next_block.
- """
-
- marked = False
- have_return = False
- auto_inserted_return = False
-
- def __init__(self):
- self.instructions = []
- self.next_block = None
-
- def _post_order_see(self, stack, nextblock):
- if nextblock.marked == 0:
- nextblock.marked = 1
- stack.append(nextblock)
-
- def post_order(self):
- """Return this block and its children in post order. This means
- that the graph of blocks is first cleaned up to ignore
- back-edges, thus turning it into a DAG. Then the DAG is
- linearized. For example:
-
- A --> B -\ => [A, D, B, C]
- \-> D ---> C
- """
- resultblocks = []
- stack = [self]
- self.marked = 1
- while stack:
- current = stack[-1]
- if current.marked == 1:
- current.marked = 2
- if current.next_block is not None:
- self._post_order_see(stack, current.next_block)
- else:
- i = current.marked - 2
- assert i >= 0
- while i < len(current.instructions):
- instr = current.instructions[i]
- i += 1
- if instr.has_jump:
- current.marked = i + 2
- self._post_order_see(stack, instr.jump[0])
- break
- else:
- resultblocks.append(current)
- stack.pop()
- resultblocks.reverse()
- return resultblocks
-
- def code_size(self):
- """Return the encoded size of all the instructions in this
- block.
- """
- i = 0
- for instr in self.instructions:
- i += instr.size()
- return i
-
- def get_code(self):
- """Encode the instructions in this block into bytecode."""
- code = []
- for instr in self.instructions:
- opcode = instr.opcode
- if opcode >= ops.HAVE_ARGUMENT:
- arg = instr.arg
- if instr.arg > 0xFFFF:
- ext = arg >> 16
- code.append(chr(ops.EXTENDED_ARG))
- code.append(chr(ext & 0xFF))
- code.append(chr(ext >> 8))
- arg &= 0xFFFF
- code.append(chr(opcode))
- code.append(chr(arg & 0xFF))
- code.append(chr(arg >> 8))
- else:
- code.append(chr(opcode))
- return ''.join(code)
-
-
-def _make_index_dict_filter(syms, flag):
- i = 0
- result = {}
- for name, scope in syms.iteritems():
- if scope == flag:
- result[name] = i
- i += 1
- return result
-
-
- at specialize.argtype(0)
-def _iter_to_dict(iterable, offset=0):
- result = {}
- index = offset
- for item in iterable:
- result[item] = index
- index += 1
- return result
-
-
-class PythonCodeMaker(ast.ASTVisitor):
- """Knows how to assemble a PyCode object."""
-
- def __init__(self, space, name, first_lineno, scope, compile_info):
- self.space = space
- self.name = name
- self.first_lineno = first_lineno
- self.compile_info = compile_info
- self.first_block = self.new_block()
- self.use_block(self.first_block)
- self.names = {}
- self.var_names = _iter_to_dict(scope.varnames)
- self.cell_vars = _make_index_dict_filter(scope.symbols,
- symtable.SCOPE_CELL)
- self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars))
- self.w_consts = space.newdict()
- self.argcount = 0
- self.kwonlyargcount = 0
- self.lineno_set = False
- self.lineno = 0
- self.add_none_to_final_return = True
-
- def new_block(self):
- return Block()
-
- def use_block(self, block):
- """Start emitting bytecode into block."""
- self.current_block = block
- self.instrs = block.instructions
-
- def use_next_block(self, block=None):
- """Set this block as the next_block for the last and use it."""
- if block is None:
- block = self.new_block()
- self.current_block.next_block = block
- self.use_block(block)
- return block
-
- def is_dead_code(self):
- """Return False if any code can be meaningfully added to the
- current block, or True if it would be dead code."""
- # currently only True after a RETURN_VALUE.
- return self.current_block.have_return
-
- def emit_op(self, op):
- """Emit an opcode without an argument."""
- instr = Instruction(op)
- if not self.lineno_set:
- instr.lineno = self.lineno
- self.lineno_set = True
- if not self.is_dead_code():
- self.instrs.append(instr)
- if op == ops.RETURN_VALUE:
- self.current_block.have_return = True
- return instr
-
- def emit_op_arg(self, op, arg):
- """Emit an opcode with an integer argument."""
- instr = Instruction(op, arg)
- if not self.lineno_set:
- instr.lineno = self.lineno
- self.lineno_set = True
- if not self.is_dead_code():
- self.instrs.append(instr)
-
- def emit_op_name(self, op, container, name):
- """Emit an opcode referencing a name."""
- self.emit_op_arg(op, self.add_name(container, name))
-
- def emit_jump(self, op, block_to, absolute=False):
- """Emit a jump opcode to another block."""
- self.emit_op(op).jump_to(block_to, absolute)
-
- def add_name(self, container, name):
- """Get the index of a name in container."""
- name = self.scope.mangle(name)
- try:
- index = container[name]
- except KeyError:
- index = len(container)
- container[name] = index
- return index
-
- def add_const(self, obj):
- """Add a W_Root to the constant array and return its location."""
- space = self.space
- # To avoid confusing equal but separate types, we hash store the type
- # of the constant in the dictionary. Moreover, we have to keep the
- # difference between -0.0 and 0.0 floats, and this recursively in
- # tuples.
- w_key = self._make_key(obj)
-
- w_len = space.finditem(self.w_consts, w_key)
- if w_len is None:
- w_len = space.len(self.w_consts)
- space.setitem(self.w_consts, w_key, w_len)
- if space.int_w(w_len) == 0:
- self.scope.doc_removable = False
- return space.int_w(w_len)
-
- def _make_key(self, obj):
- # see the tests 'test_zeros_not_mixed*' in ../test/test_compiler.py
- space = self.space
- w_type = space.type(obj)
- if space.is_w(w_type, space.w_float):
- val = space.float_w(obj)
- if val == 0.0 and rfloat.copysign(1., val) < 0:
- w_key = space.newtuple([obj, space.w_float, space.w_None])
- else:
- w_key = space.newtuple([obj, space.w_float])
- elif space.is_w(w_type, space.w_complex):
- w_real = space.getattr(obj, space.wrap("real"))
- w_imag = space.getattr(obj, space.wrap("imag"))
- real = space.float_w(w_real)
- imag = space.float_w(w_imag)
- real_negzero = (real == 0.0 and
- rfloat.copysign(1., real) < 0)
- imag_negzero = (imag == 0.0 and
- rfloat.copysign(1., imag) < 0)
- if real_negzero and imag_negzero:
- tup = [obj, space.w_complex, space.w_None, space.w_None,
- space.w_None]
- elif imag_negzero:
- tup = [obj, space.w_complex, space.w_None, space.w_None]
- elif real_negzero:
- tup = [obj, space.w_complex, space.w_None]
- else:
- tup = [obj, space.w_complex]
- w_key = space.newtuple(tup)
- elif space.is_w(w_type, space.w_tuple):
- result_w = [obj, w_type]
- for w_item in space.fixedview(obj):
- result_w.append(self._make_key(w_item))
- w_key = space.newtuple(result_w[:])
- elif isinstance(obj, PyCode):
- w_key = space.newtuple([obj, w_type, space.id(obj)])
- else:
- w_key = space.newtuple([obj, w_type])
- return w_key
-
- def load_const(self, obj):
- index = self.add_const(obj)
- self.emit_op_arg(ops.LOAD_CONST, index)
-
- def update_position(self, lineno, force=False):
- """Possibly change the lineno for the next instructions."""
- if force or lineno > self.lineno:
- self.lineno = lineno
- self.lineno_set = False
-
- def _resolve_block_targets(self, blocks):
- """Compute the arguments of jump instructions."""
- last_extended_arg_count = 0
- # The reason for this loop is extended jumps. EXTENDED_ARG
- # extends the bytecode size, so it might invalidate the offsets
- # we've already given. Thus we have to loop until the number of
- # extended args is stable. Any extended jump at all is
- # extremely rare, so performance is not too concerning.
- while True:
- extended_arg_count = 0
- offset = 0
- force_redo = False
- # Calculate the code offset of each block.
- for block in blocks:
- block.offset = offset
- offset += block.code_size()
- for block in blocks:
- offset = block.offset
- for instr in block.instructions:
- offset += instr.size()
- if instr.has_jump:
- target, absolute = instr.jump
- op = instr.opcode
- # Optimize an unconditional jump going to another
- # unconditional jump.
- if op == ops.JUMP_ABSOLUTE or op == ops.JUMP_FORWARD:
- if target.instructions:
- target_op = target.instructions[0].opcode
- if target_op == ops.JUMP_ABSOLUTE:
- target = target.instructions[0].jump[0]
- instr.opcode = ops.JUMP_ABSOLUTE
- absolute = True
- elif target_op == ops.RETURN_VALUE:
- # Replace JUMP_* to a RETURN into
- # just a RETURN
- instr.opcode = ops.RETURN_VALUE
- instr.arg = 0
- instr.has_jump = False
- # The size of the code changed,
- # we have to trigger another pass
- force_redo = True
- continue
- if absolute:
- jump_arg = target.offset
- else:
- jump_arg = target.offset - offset
- instr.arg = jump_arg
- if jump_arg > 0xFFFF:
- extended_arg_count += 1
- if (extended_arg_count == last_extended_arg_count and
- not force_redo):
- break
- else:
- last_extended_arg_count = extended_arg_count
-
- def _build_consts_array(self):
- """Turn the applevel constants dictionary into a list."""
- w_consts = self.w_consts
- space = self.space
- consts_w = [space.w_None] * space.len_w(w_consts)
- w_iter = space.iter(w_consts)
- first = space.wrap(0)
- while True:
- try:
- w_key = space.next(w_iter)
- except OperationError as e:
- if not e.match(space, space.w_StopIteration):
- raise
- break
- w_index = space.getitem(w_consts, w_key)
- w_constant = space.getitem(w_key, first)
- w_constant = misc.intern_if_common_string(space, w_constant)
- consts_w[space.int_w(w_index)] = w_constant
- return consts_w
-
- def _get_code_flags(self):
- """Get an extra flags that should be attached to the code object."""
- raise NotImplementedError
-
- def _stacksize(self, blocks):
- """Compute co_stacksize."""
- for block in blocks:
- block.initial_depth = 0
- # Assumes that it is sufficient to walk the blocks in 'post-order'.
- # This means we ignore all back-edges, but apart from that, we only
- # look into a block when all the previous blocks have been done.
- self._max_depth = 0
- for block in blocks:
- depth = self._do_stack_depth_walk(block)
- if block.auto_inserted_return and depth != 0:
- os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
- self.compile_info.filename, self.name, self.first_lineno))
- raise StackDepthComputationError # fatal error
- return self._max_depth
-
- def _next_stack_depth_walk(self, nextblock, depth):
- if depth > nextblock.initial_depth:
- nextblock.initial_depth = depth
-
- def _do_stack_depth_walk(self, block):
- depth = block.initial_depth
- for instr in block.instructions:
- depth += _opcode_stack_effect(instr.opcode, instr.arg)
- if depth >= self._max_depth:
- self._max_depth = depth
- jump_op = instr.opcode
- if instr.has_jump:
- target_depth = depth
- if jump_op == ops.FOR_ITER:
- target_depth -= 2
- elif (jump_op == ops.SETUP_FINALLY or
- jump_op == ops.SETUP_EXCEPT or
- jump_op == ops.SETUP_WITH):
- if jump_op == ops.SETUP_FINALLY:
- target_depth += 4
- elif jump_op == ops.SETUP_EXCEPT:
- target_depth += 4
- elif jump_op == ops.SETUP_WITH:
- target_depth += 3
- if target_depth > self._max_depth:
- self._max_depth = target_depth
- elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
- jump_op == ops.JUMP_IF_FALSE_OR_POP):
- depth -= 1
- self._next_stack_depth_walk(instr.jump[0], target_depth)
- if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
- # Nothing more can occur.
- break
- elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
- # Nothing more can occur.
- break
- else:
- if block.next_block:
- self._next_stack_depth_walk(block.next_block, depth)
- return depth
-
- def _build_lnotab(self, blocks):
- """Build the line number table for tracebacks and tracing."""
- current_line = self.first_lineno
- current_off = 0
- table = []
- push = table.append
- for block in blocks:
- offset = block.offset
- for instr in block.instructions:
- if instr.lineno:
- # compute deltas
- line = instr.lineno - current_line
- if line < 0:
- continue
- addr = offset - current_off
- # Python assumes that lineno always increases with
- # increasing bytecode address (lnotab is unsigned
- # char). Depending on when SET_LINENO instructions
- # are emitted this is not always true. Consider the
- # code:
- # a = (1,
- # b)
- # In the bytecode stream, the assignment to "a"
- # occurs after the loading of "b". This works with
- # the C Python compiler because it only generates a
- # SET_LINENO instruction for the assignment.
- if line or addr:
- while addr > 255:
- push(chr(255))
- push(chr(0))
- addr -= 255
- while line > 255:
- push(chr(addr))
- push(chr(255))
- line -= 255
- addr = 0
- push(chr(addr))
- push(chr(line))
- current_line = instr.lineno
- current_off = offset
- offset += instr.size()
- return ''.join(table)
-
- def assemble(self):
- """Build a PyCode object."""
- # Unless it's interactive, every code object must end in a return.
- if not self.current_block.have_return:
- self.use_next_block()
- if self.add_none_to_final_return:
- self.load_const(self.space.w_None)
- self.emit_op(ops.RETURN_VALUE)
- self.current_block.auto_inserted_return = True
- # Set the first lineno if it is not already explicitly set.
- if self.first_lineno == -1:
- if self.first_block.instructions:
- self.first_lineno = self.first_block.instructions[0].lineno
- else:
- self.first_lineno = 1
- blocks = self.first_block.post_order()
- self._resolve_block_targets(blocks)
- lnotab = self._build_lnotab(blocks)
- stack_depth = self._stacksize(blocks)
- consts_w = self._build_consts_array()
- names = _list_from_dict(self.names)
- var_names = _list_from_dict(self.var_names)
- cell_names = _list_from_dict(self.cell_vars)
- free_names = _list_from_dict(self.free_vars, len(cell_names))
- flags = self._get_code_flags()
- # (Only) inherit compilerflags in PyCF_MASK
- flags |= (self.compile_info.flags & consts.PyCF_MASK)
- bytecode = ''.join([block.get_code() for block in blocks])
- return PyCode(self.space,
- self.argcount,
- self.kwonlyargcount,
- len(self.var_names),
- stack_depth,
- flags,
- bytecode,
- list(consts_w),
- names,
- var_names,
- self.compile_info.filename,
- self.name,
- self.first_lineno,
- lnotab,
- free_names,
- cell_names,
- self.compile_info.hidden_applevel)
-
-
-def _list_from_dict(d, offset=0):
- result = [None] * len(d)
- for obj, index in d.iteritems():
- result[index - offset] = obj
- return result
-
-
-_static_opcode_stack_effects = {
- ops.NOP: 0,
-
- ops.POP_TOP: -1,
- ops.ROT_TWO: 0,
- ops.ROT_THREE: 0,
- ops.DUP_TOP: 1,
- ops.DUP_TOP_TWO: 2,
-
- ops.UNARY_POSITIVE: 0,
- ops.UNARY_NEGATIVE: 0,
- ops.UNARY_NOT: 0,
- ops.UNARY_INVERT: 0,
-
- ops.LIST_APPEND: -1,
- ops.SET_ADD: -1,
- ops.MAP_ADD: -2,
-<<<<<<< local
-=======
- # XXX
- ops.STORE_MAP: -2,
->>>>>>> other
-
- ops.BINARY_POWER: -1,
- ops.BINARY_MULTIPLY: -1,
- ops.BINARY_MODULO: -1,
- ops.BINARY_ADD: -1,
- ops.BINARY_SUBTRACT: -1,
- ops.BINARY_SUBSCR: -1,
- ops.BINARY_FLOOR_DIVIDE: -1,
- ops.BINARY_TRUE_DIVIDE: -1,
- ops.BINARY_MATRIX_MULTIPLY: -1,
- ops.BINARY_LSHIFT: -1,
- ops.BINARY_RSHIFT: -1,
- ops.BINARY_AND: -1,
- ops.BINARY_OR: -1,
- ops.BINARY_XOR: -1,
-
- ops.INPLACE_FLOOR_DIVIDE: -1,
- ops.INPLACE_TRUE_DIVIDE: -1,
- ops.INPLACE_ADD: -1,
- ops.INPLACE_SUBTRACT: -1,
- ops.INPLACE_MULTIPLY: -1,
- ops.INPLACE_MODULO: -1,
- ops.INPLACE_POWER: -1,
- ops.INPLACE_MATRIX_MULTIPLY: -1,
- ops.INPLACE_LSHIFT: -1,
- ops.INPLACE_RSHIFT: -1,
- ops.INPLACE_AND: -1,
- ops.INPLACE_OR: -1,
- ops.INPLACE_XOR: -1,
-
- ops.STORE_SUBSCR: -3,
- ops.DELETE_SUBSCR: -2,
-
- ops.GET_ITER: 0,
- ops.FOR_ITER: 1,
- ops.BREAK_LOOP: 0,
- ops.CONTINUE_LOOP: 0,
- ops.SETUP_LOOP: 0,
-
- ops.PRINT_EXPR: -1,
-
-<<<<<<< local
- ops.WITH_CLEANUP_START: -1,
- ops.WITH_CLEANUP_FINISH: -1, # XXX Sometimes more
-=======
- # TODO
- ops.WITH_CLEANUP: -1,
->>>>>>> other
- ops.LOAD_BUILD_CLASS: 1,
-<<<<<<< local
-=======
- # TODO
- ops.STORE_LOCALS: -1,
->>>>>>> other
- ops.POP_BLOCK: 0,
- ops.POP_EXCEPT: -1,
- ops.END_FINALLY: -4, # assume always 4: we pretend that SETUP_FINALLY
- # pushes 4. In truth, it would only push 1 and
- # the corresponding END_FINALLY only pops 1.
- ops.SETUP_WITH: 1,
- ops.SETUP_FINALLY: 0,
- ops.SETUP_EXCEPT: 0,
-
- ops.RETURN_VALUE: -1,
- ops.YIELD_VALUE: 0,
- ops.YIELD_FROM: -1,
- ops.COMPARE_OP: -1,
-
- # TODO
- ops.LOOKUP_METHOD: 1,
-
- ops.LOAD_NAME: 1,
- ops.STORE_NAME: -1,
- ops.DELETE_NAME: 0,
-
- ops.LOAD_FAST: 1,
- ops.STORE_FAST: -1,
- ops.DELETE_FAST: 0,
-
- ops.LOAD_ATTR: 0,
- ops.STORE_ATTR: -2,
- ops.DELETE_ATTR: -1,
-
- ops.LOAD_GLOBAL: 1,
- ops.STORE_GLOBAL: -1,
- ops.DELETE_GLOBAL: 0,
- ops.DELETE_DEREF: 0,
-
- ops.LOAD_CLOSURE: 1,
- ops.LOAD_DEREF: 1,
- ops.STORE_DEREF: -1,
- ops.DELETE_DEREF: 0,
From pypy.commits at gmail.com Thu Aug 11 15:25:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 12:25:02 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: During debugging,
print a separator line when a thread switch occurs
Message-ID: <57acd10e.d32d1c0a.a6c21.851e@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86160:e2ab29983fd5
Date: 2016-08-11 21:24 +0200
http://bitbucket.org/pypy/pypy/changeset/e2ab29983fd5/
Log: During debugging, print a separator line when a thread switch occurs
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -37,10 +37,19 @@
def interact(self):
last_command = 'help'
previous_time = None
+ previous_thread = 0
while True:
last_time = self.pgroup.get_current_time()
if last_time != previous_time:
print
+ if self.pgroup.get_current_thread() != previous_thread:
+ previous_thread = self.pgroup.get_current_thread()
+ if previous_thread == 0:
+ print ('-------------------- in the main thread '
+ '--------------------')
+ else:
+ print ('-------------------- in non-main thread '
+ '#%d --------------------' % (previous_thread,))
self.pgroup.update_watch_values()
last_time = self.pgroup.get_current_time()
if self.print_extra_pending_info:
@@ -49,6 +58,7 @@
if last_time != previous_time:
self.pgroup.show_backtrace(complete=0)
previous_time = last_time
+
prompt = '(%d)$ ' % last_time
try:
cmdline = raw_input(prompt).strip()
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -120,12 +120,13 @@
return msg
def expect_ready(self):
- msg = self.expect(ANSWER_READY, Ellipsis, Ellipsis)
+ msg = self.expect(ANSWER_READY, Ellipsis, Ellipsis, Ellipsis)
self.update_times(msg)
def update_times(self, msg):
self.current_time = msg.arg1
self.currently_created_objects = msg.arg2
+ self.current_thread = msg.arg3
def clone(self):
"""Fork this subprocess. Returns a new ReplayProcess() that is
@@ -252,10 +253,13 @@
def get_currently_created_objects(self):
return self.active.currently_created_objects
+ def get_current_thread(self):
+ return self.active.current_thread
+
def _check_current_time(self, time):
assert self.get_current_time() == time
self.active.send(Message(CMD_FORWARD, 0))
- return self.active.expect(ANSWER_READY, time, Ellipsis)
+ return self.active.expect(ANSWER_READY, time, Ellipsis, Ellipsis)
def get_max_time(self):
return self.total_stop_points
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -663,6 +663,7 @@
static stacklet_thread_handle st_thread;
static stacklet_handle st_outer_controller_h;
static uint64_t current_thread_id, target_thread_id;
+static uint64_t current_thread_num, next_thread_num;
static void *thread_tree_root;
@@ -672,7 +673,7 @@
char **argv;
};
struct replay_thread_s {
- uint64_t tid;
+ uint64_t tid, tnum;
stacklet_handle h;
struct pypy_threadlocal_s tloc;
};
@@ -754,7 +755,9 @@
struct replay_thread_s *node, **item, dummy;
if (real_tloc == NULL) {
- _OP_THREADLOCALREF_ADDR_SIGHANDLER(real_tloc);
+ char *p;
+ _OP_THREADLOCALREF_ADDR_SIGHANDLER(p);
+ real_tloc = (struct pypy_threadlocal_s *)p;
}
if (h == NULL)
@@ -767,6 +770,7 @@
if (!node)
goto out_of_memory;
node->tid = current_thread_id;
+ node->tnum = current_thread_num;
node->h = h;
/* save the thread-locals, if any */
if (real_tloc != NULL)
@@ -793,6 +797,7 @@
item = tfind(&dummy, &thread_tree_root, compare_replay_thread);
if (item == NULL) {
/* it's a new thread, start it now */
+ current_thread_num = next_thread_num++;
if (real_tloc != NULL)
memset(((char *)real_tloc) + RPY_TLOFSFIRST, 0,
sizeof(struct pypy_threadlocal_s) - RPY_TLOFSFIRST);
@@ -801,6 +806,7 @@
else {
node = *item;
assert(node->tid == target_thread_id);
+ current_thread_num = node->tnum;
h = node->h;
tdelete(node, &thread_tree_root, compare_replay_thread);
if (real_tloc != NULL)
@@ -957,6 +963,8 @@
exit(1);
}
current_thread_id = h.main_thread_id;
+ current_thread_num = 0;
+ next_thread_num = 1;
if (h.ptr1 != &rpy_reverse_db_stop_point ||
h.ptr2 != &rpy_revdb) {
fprintf(stderr,
@@ -1389,7 +1397,7 @@
write_answer(ANSWER_READY,
saved_state.stop_point_seen,
saved_state.unique_id_seen,
- 0);
+ current_thread_num);
read_sock(&cmd, sizeof(cmd));
char extra[cmd.extra_size + 1];
diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py
--- a/rpython/translator/revdb/test/test_thread.py
+++ b/rpython/translator/revdb/test/test_thread.py
@@ -166,13 +166,14 @@
child = self.replay()
for i in range(2, 6):
child.send(Message(CMD_FORWARD, 1))
- child.expect(ANSWER_READY, i, Ellipsis)
+ child.expect(ANSWER_READY, i, Ellipsis,
+ (i & 1) ^ 1) # thread number: either 0 or 1 here
child.send(Message(CMD_FORWARD, 1))
child.expect(ANSWER_AT_END)
class TestThreadLocal(InteractiveTests):
- expected_stop_points = 1
+ expected_stop_points = 2
def setup_class(cls):
from rpython.translator.revdb.test.test_basic import compile, run
@@ -192,6 +193,7 @@
rthread.gc_thread_die()
def main(argv):
+ revdb.stop_point()
ec = EC(12)
raw_thread_local.set(ec)
rthread.start_new_thread(bootstrap, ())
@@ -206,4 +208,6 @@
def test_go_threadlocal(self):
child = self.replay()
child.send(Message(CMD_FORWARD, 1))
+ child.expect(ANSWER_READY, 2, Ellipsis, 1)
+ child.send(Message(CMD_FORWARD, 1))
child.expect(ANSWER_AT_END)
From pypy.commits at gmail.com Thu Aug 11 15:55:22 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Thu, 11 Aug 2016 12:55:22 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Fix validate tests,
ast classes "arguments",
"classdef" and "call" don't have stararg and kwarg anymore
Message-ID: <57acd82a.2472c20a.3b6d9.7a19@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86161:14df96842b97
Date: 2016-08-11 21:54 +0200
http://bitbucket.org/pypy/pypy/changeset/14df96842b97/
Log: Fix validate tests, ast classes "arguments", "classdef" and "call"
don't have stararg and kwarg anymore
diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py
--- a/pypy/interpreter/astcompiler/test/test_validate.py
+++ b/pypy/interpreter/astcompiler/test/test_validate.py
@@ -38,9 +38,8 @@
self.mod(m, "must have Load context", "eval")
def _check_arguments(self, fac, check):
- def arguments(args=None, vararg=None, varargannotation=None,
- kwonlyargs=None, kwarg=None, kwargannotation=None,
- defaults=None, kw_defaults=None):
+ def arguments(args=None, vararg=None, kwonlyargs=None,
+ kw_defaults=None, kwarg=None, defaults=None):
if args is None:
args = []
if kwonlyargs is None:
@@ -49,20 +48,12 @@
defaults = []
if kw_defaults is None:
kw_defaults = []
- args = ast.arguments(args, vararg, varargannotation, kwonlyargs,
- kwarg, kwargannotation, defaults, kw_defaults)
+ args = ast.arguments(args, vararg, kwonlyargs,
+ kw_defaults, kwarg, defaults)
return fac(args)
args = [ast.arg("x", ast.Name("x", ast.Store, 0, 0))]
check(arguments(args=args), "must have Load context")
- check(arguments(varargannotation=ast.Num(self.space.wrap(3), 0, 0)),
- "varargannotation but no vararg")
- check(arguments(varargannotation=ast.Name("x", ast.Store, 0, 0), vararg="x"),
- "must have Load context")
check(arguments(kwonlyargs=args), "must have Load context")
- check(arguments(kwargannotation=ast.Num(self.space.wrap(42), 0, 0)),
- "kwargannotation but no kwarg")
- check(arguments(kwargannotation=ast.Name("x", ast.Store, 0, 0),
- kwarg="x"), "must have Load context")
check(arguments(defaults=[ast.Num(self.space.wrap(3), 0, 0)]),
"more positional defaults than args")
check(arguments(kw_defaults=[ast.Num(self.space.wrap(4), 0, 0)]),
@@ -77,7 +68,7 @@
"must have Load context")
def test_funcdef(self):
- a = ast.arguments([], None, None, [], None, None, [], [])
+ a = ast.arguments([], None, [], [], None, [])
f = ast.FunctionDef("x", a, [], [], None, 0, 0)
self.stmt(f, "empty body on FunctionDef")
f = ast.FunctionDef("x", a, [ast.Pass(0, 0)], [ast.Name("x", ast.Store, 0, 0)],
@@ -91,8 +82,7 @@
self._check_arguments(fac, self.stmt)
def test_classdef(self):
- def cls(bases=None, keywords=None, starargs=None, kwargs=None,
- body=None, decorator_list=None):
+ def cls(bases=None, keywords=None, body=None, decorator_list=None):
if bases is None:
bases = []
if keywords is None:
@@ -101,16 +91,12 @@
body = [ast.Pass(0, 0)]
if decorator_list is None:
decorator_list = []
- return ast.ClassDef("myclass", bases, keywords, starargs,
- kwargs, body, decorator_list, 0, 0)
+ return ast.ClassDef("myclass", bases, keywords,
+ body, decorator_list, 0, 0)
self.stmt(cls(bases=[ast.Name("x", ast.Store, 0, 0)]),
"must have Load context")
self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store, 0, 0))]),
"must have Load context")
- self.stmt(cls(starargs=ast.Name("x", ast.Store, 0, 0)),
- "must have Load context")
- self.stmt(cls(kwargs=ast.Name("x", ast.Store, 0, 0)),
- "must have Load context")
self.stmt(cls(body=[]), "empty body on ClassDef")
self.stmt(cls(body=[None]), "None disallowed")
self.stmt(cls(decorator_list=[ast.Name("x", ast.Store, 0, 0)]),
@@ -250,7 +236,7 @@
self.expr(u, "must have Load context")
def test_lambda(self):
- a = ast.arguments([], None, None, [], None, None, [], [])
+ a = ast.arguments([], None, [], [], None, [])
self.expr(ast.Lambda(a, ast.Name("x", ast.Store, 0, 0), 0, 0),
"must have Load context")
def fac(args):
@@ -343,20 +329,12 @@
func = ast.Name("x", ast.Load, 0, 0)
args = [ast.Name("y", ast.Load, 0, 0)]
keywords = [ast.keyword("w", ast.Name("z", ast.Load, 0, 0))]
- stararg = ast.Name("p", ast.Load, 0, 0)
- kwarg = ast.Name("q", ast.Load, 0, 0)
- call = ast.Call(ast.Name("x", ast.Store, 0, 0), args, keywords, stararg,
- kwarg, 0, 0)
+ call = ast.Call(ast.Name("x", ast.Store, 0, 0), args, keywords, 0, 0)
self.expr(call, "must have Load context")
- call = ast.Call(func, [None], keywords, stararg, kwarg, 0, 0)
+ call = ast.Call(func, [None], keywords, 0, 0)
self.expr(call, "None disallowed")
bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store, 0, 0))]
- call = ast.Call(func, args, bad_keywords, stararg, kwarg, 0, 0)
- self.expr(call, "must have Load context")
- call = ast.Call(func, args, keywords, ast.Name("z", ast.Store, 0, 0), kwarg, 0, 0)
- self.expr(call, "must have Load context")
- call = ast.Call(func, args, keywords, stararg,
- ast.Name("w", ast.Store, 0, 0), 0, 0)
+ call = ast.Call(func, args, bad_keywords, 0, 0)
self.expr(call, "must have Load context")
def test_num(self):
From pypy.commits at gmail.com Thu Aug 11 16:15:48 2016
From: pypy.commits at gmail.com (arigo)
Date: Thu, 11 Aug 2016 13:15:48 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Add commands 'nthread' and
'bthread' to navigate thread switches.
Message-ID: <57acdcf4.c997c20a.41333.7c4d@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86162:9b771a2cb860
Date: 2016-08-11 22:15 +0200
http://bitbucket.org/pypy/pypy/changeset/9b771a2cb860/
Log: Add commands 'nthread' and 'bthread' to navigate thread switches.
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -479,6 +479,7 @@
def command_breakpoints(cmd, extra):
space = dbstate.space
dbstate.breakpoint_stack_id = cmd.c_arg1
+ revdb.set_thread_breakpoint(cmd.c_arg2)
funcnames = None
watch_progs = []
with non_standard_code:
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -92,6 +92,9 @@
def breakpoint(num):
llop.revdb_breakpoint(lltype.Void, num)
+def set_thread_breakpoint(tnum):
+ llop.revdb_set_thread_breakpoint(lltype.Void, tnum)
+
@specialize.argtype(0)
def get_unique_id(x):
"""Returns the creation number of the object 'x'. For objects created
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -583,6 +583,7 @@
'revdb_weakref_create': LLOp(),
'revdb_weakref_deref': LLOp(),
'revdb_call_destructor': LLOp(),
+ 'revdb_set_thread_breakpoint': LLOp(),
}
# ***** Run test_lloperation after changes. *****
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -45,7 +45,7 @@
if self.pgroup.get_current_thread() != previous_thread:
previous_thread = self.pgroup.get_current_thread()
if previous_thread == 0:
- print ('-------------------- in the main thread '
+ print ('-------------------- in main thread #0 '
'--------------------')
else:
print ('-------------------- in non-main thread '
@@ -147,6 +147,9 @@
elif num == -3:
kind = 'stoppoint'
name = 'explicit stop'
+ elif num == -4:
+ kind = 'switchpoint'
+ name = 'thread switch'
else:
kind = '?????point'
name = repr(break_at)
@@ -245,6 +248,17 @@
finally:
b.stack_id = 0
+ @contextmanager
+ def _thread_num_break(self, thread_num):
+ # add temporarily a breakpoint that hits when we enter/leave
+ # the given thread
+ b = self.pgroup.edit_breakpoints()
+ b.thread_num = thread_num
+ try:
+ yield
+ finally:
+ b.thread_num = -1
+
def command_next(self, argument):
"""Run forward for one step, skipping calls"""
while True:
@@ -308,7 +322,7 @@
"""Run forward until the current function finishes"""
stack_id = self.pgroup.get_stack_id(is_parent=True)
if stack_id == 0:
- print 'No stack.'
+ print 'No caller.'
else:
with self._stack_id_break(stack_id):
self.command_continue('')
@@ -317,7 +331,7 @@
"""Run backward until the current function is called"""
stack_id = self.pgroup.get_stack_id(is_parent=True)
if stack_id == 0:
- print 'No stack.'
+ print 'No caller.'
else:
with self._stack_id_break(stack_id):
self.command_bcontinue('')
@@ -333,6 +347,31 @@
self.move_backward(self.pgroup.get_current_time() - 1)
command_bc = command_bcontinue
+ def _cmd_thread(self, argument, cmd_continue):
+ argument = argument.lstrip('#')
+ if argument:
+ arg = int(argument)
+ if arg == self.pgroup.get_current_thread():
+ print 'Thread #%d is already the current one.' % (arg,)
+ return
+ else:
+ # use the current thread number to detect switches to any
+ # other thread (this works because revdb.c issues a
+ # breakpoint whenever there is a switch FROM or TO the
+ # thread '#arg').
+ arg = self.pgroup.get_current_thread()
+ #
+ with self._thread_num_break(arg):
+ cmd_continue('')
+
+ def command_nthread(self, argument):
+ """Run forward until thread switch (optionally to #ARG)"""
+ self._cmd_thread(argument, self.command_continue)
+
+ def command_bthread(self, argument):
+ """Run backward until thread switch (optionally to #ARG)"""
+ self._cmd_thread(argument, self.command_bcontinue)
+
def command_print(self, argument):
"""Print an expression or execute a line of code"""
# locate which $NUM appear used in the expression
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -31,15 +31,17 @@
self.watchuids = {} # {small number: [uid...]}
self.stack_id = 0 # breaks when leaving/entering a frame from/to
# the frame identified by 'stack_id'
+ self.thread_num = -1 # breaks when leaving/entering the thread_num
def __repr__(self):
- return 'AllBreakpoints(%r, %r, %r, %r)' % (
+ return 'AllBreakpoints(%r, %r, %r, %r, %r)' % (
self.num2break, self.watchvalues, self.watchuids,
- self.stack_id)
+ self.stack_id, self.thread_num)
def compare(self, other):
if (self.num2break == other.num2break and
- self.stack_id == other.stack_id):
+ self.stack_id == other.stack_id and
+ self.thread_num == other.thread_num):
if self.watchvalues == other.watchvalues:
return 2 # completely equal
else:
@@ -48,12 +50,14 @@
return 0 # different
def is_empty(self):
- return len(self.num2break) == 0 and self.stack_id == 0
+ return (len(self.num2break) == 0 and self.stack_id == 0
+ and self.thread_num == -1)
def duplicate(self):
a = AllBreakpoints()
a.num2break.update(self.num2break)
a.stack_id = self.stack_id
+ a.thread_num = self.thread_num
return a
@@ -392,8 +396,9 @@
if cmp == 0:
flat = [num2break.get(n, '\x00') for n in range(N)]
arg1 = self.all_breakpoints.stack_id
+ arg2 = self.all_breakpoints.thread_num
extra = ''.join(flat)
- self.active.send(Message(CMD_BREAKPOINTS, arg1, extra=extra))
+ self.active.send(Message(CMD_BREAKPOINTS, arg1, arg2, extra=extra))
self.active.expect_ready()
else:
assert cmp == 1
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -663,7 +663,7 @@
static stacklet_thread_handle st_thread;
static stacklet_handle st_outer_controller_h;
static uint64_t current_thread_id, target_thread_id;
-static uint64_t current_thread_num, next_thread_num;
+static uint64_t current_thread_num, next_thread_num, break_thread_num;
static void *thread_tree_root;
@@ -726,6 +726,13 @@
return 1;
}
+static void set_current_thread_num(uint64_t tnum)
+{
+ if (break_thread_num == current_thread_num || break_thread_num == tnum)
+ rpy_reverse_db_breakpoint(-4);
+ current_thread_num = tnum;
+}
+
RPY_EXTERN
int rpy_reverse_db_main(Signed entry_point(Signed, char**),
int argc, char **argv)
@@ -797,7 +804,7 @@
item = tfind(&dummy, &thread_tree_root, compare_replay_thread);
if (item == NULL) {
/* it's a new thread, start it now */
- current_thread_num = next_thread_num++;
+ set_current_thread_num(next_thread_num++);
if (real_tloc != NULL)
memset(((char *)real_tloc) + RPY_TLOFSFIRST, 0,
sizeof(struct pypy_threadlocal_s) - RPY_TLOFSFIRST);
@@ -806,7 +813,7 @@
else {
node = *item;
assert(node->tid == target_thread_id);
- current_thread_num = node->tnum;
+ set_current_thread_num(node->tnum);
h = node->h;
tdelete(node, &thread_tree_root, compare_replay_thread);
if (real_tloc != NULL)
@@ -965,6 +972,7 @@
current_thread_id = h.main_thread_id;
current_thread_num = 0;
next_thread_num = 1;
+ break_thread_num = (uint64_t)-1;
if (h.ptr1 != &rpy_reverse_db_stop_point ||
h.ptr2 != &rpy_revdb) {
fprintf(stderr,
@@ -1713,6 +1721,12 @@
exit(1);
}
+RPY_EXTERN
+void rpy_reverse_db_set_thread_breakpoint(int64_t tnum)
+{
+ break_thread_num = (uint64_t)tnum;
+}
+
/* ------------------------------------------------------------ */
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -227,6 +227,9 @@
we'll just return the UID. */
#define RPY_REVDB_CAST_PTR_TO_INT(obj) (((struct pypy_header0 *)obj)->h_uid)
+#define OP_REVDB_SET_THREAD_BREAKPOINT(tnum, r) \
+ rpy_reverse_db_set_thread_breakpoint(tnum)
+
RPY_EXTERN void rpy_reverse_db_flush(void); /* must be called with the lock */
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
@@ -249,5 +252,6 @@
RPY_EXTERN void rpy_reverse_db_callback_loc(int);
RPY_EXTERN void rpy_reverse_db_lock_acquire(bool_t lock_contention);
RPY_EXTERN void rpy_reverse_db_bad_acquire_gil(void);
+RPY_EXTERN void rpy_reverse_db_set_thread_breakpoint(int64_t tnum);
/* ------------------------------------------------------------ */
From pypy.commits at gmail.com Fri Aug 12 04:36:42 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 01:36:42 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Some tests for prints,
including printing $0
Message-ID: <57ad8a9a.cb7f1c0a.dff6f.75ca@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86163:d7c4d2ccc68a
Date: 2016-08-12 10:20 +0200
http://bitbucket.org/pypy/pypy/changeset/d7c4d2ccc68a/
Log: Some tests for prints, including printing $0
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -41,7 +41,6 @@
"""
assert space.config.translation.reverse_debugger
dbstate.space = space
- dbstate.w_future = space.w_Ellipsis # a random prebuilt object
make_sure_not_resized(dbstate.watch_progs)
make_sure_not_resized(dbstate.metavars)
@@ -228,6 +227,9 @@
revdb.stop_point(place)
+def future_object(space):
+ return space.w_Ellipsis # a random prebuilt object
+
def load_metavar(index):
assert index >= 0
space = dbstate.space
@@ -236,7 +238,7 @@
if w_var is None:
raise oefmt(space.w_NameError, "no constant object '$%d'",
index)
- if w_var is dbstate.w_future:
+ if w_var is future_object(space):
raise oefmt(space.w_RuntimeError,
"'$%d' refers to an object created later in time",
index)
@@ -543,7 +545,7 @@
except KeyError:
# uid not found, probably a future object
dbstate.watch_futures[uid] = index_metavar
- w_obj = dbstate.w_future
+ w_obj = future_object(space)
set_metavar(index_metavar, w_obj)
lambda_attachid = lambda: command_attachid
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -1536,6 +1536,7 @@
save_state();
if (rpy_revdb_commands.rp_alloc) {
protect_potential_io();
+ /* invoke the "ALLOCATING" callback from RPython */
rpy_revdb_commands.rp_alloc(uid, new_object);
unprotect_potential_io();
}
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -1,12 +1,23 @@
-import py
+import py, sys
+from cStringIO import StringIO
from rpython.rlib import revdb
-from rpython.rlib.debug import debug_print
+from rpython.rlib.debug import debug_print, ll_assert
+from rpython.rtyper.annlowlevel import cast_gcref_to_instance
from rpython.translator.revdb.message import *
from rpython.translator.revdb.process import ReplayProcessGroup, Breakpoint
from hypothesis import given, strategies
+class stdout_capture(object):
+ def __enter__(self):
+ self.old_stdout = sys.stdout
+ sys.stdout = self.buffer = StringIO()
+ return self.buffer
+ def __exit__(self, *args):
+ sys.stdout = self.old_stdout
+
+
class TestReplayProcessGroup:
def setup_class(cls):
@@ -17,6 +28,10 @@
class DBState:
break_loop = -2
+ stuff = None
+ metavar = None
+ printed_stuff = None
+ watch_future = -1
dbstate = DBState()
def blip(cmd, extra):
@@ -27,8 +42,46 @@
revdb.send_answer(42, cmd.c_cmd, -43, -44, extra)
lambda_blip = lambda: blip
+ def command_print(cmd, extra):
+ if extra == 'print-me':
+ stuff = dbstate.stuff
+ elif extra == '$0':
+ stuff = dbstate.metavar
+ else:
+ assert False
+ uid = revdb.get_unique_id(stuff)
+ ll_assert(uid > 0, "uid == 0")
+ revdb.send_nextnid(uid) # outputs '$NUM = '
+ revdb.send_output('stuff\n')
+ dbstate.printed_stuff = stuff
+ lambda_print = lambda: command_print
+
+ def command_attachid(cmd, extra):
+ index_metavar = cmd.c_arg1
+ uid = cmd.c_arg2
+ ll_assert(index_metavar == 0, "index_metavar != 0") # in this test
+ dbstate.metavar = dbstate.printed_stuff
+ if dbstate.metavar is None:
+ # uid not found, probably a future object
+ dbstate.watch_future = uid
+ lambda_attachid = lambda: command_attachid
+
+ def command_allocating(uid, gcref):
+ stuff = cast_gcref_to_instance(Stuff, gcref)
+ # 'stuff' is just allocated; 'stuff.x' is not yet initialized
+ dbstate.printed_stuff = stuff
+ if dbstate.watch_future != -1:
+ ll_assert(dbstate.watch_future == uid,
+ "watch_future out of sync")
+ dbstate.watch_future = -1
+ dbstate.metavar = stuff
+ lambda_allocating = lambda: command_allocating
+
def main(argv):
revdb.register_debug_command(100, lambda_blip)
+ revdb.register_debug_command(CMD_PRINT, lambda_print)
+ revdb.register_debug_command(CMD_ATTACHID, lambda_attachid)
+ revdb.register_debug_command("ALLOCATING", lambda_allocating)
for i, op in enumerate(argv[1:]):
dbstate.stuff = Stuff()
dbstate.stuff.x = i + 1000
@@ -87,3 +140,26 @@
group.active.expect(42, 100, -43, -44, 'set-breakpoint')
group.active.expect(ANSWER_READY, 1, Ellipsis)
group.go_forward(10, 'i') # does not raise Breakpoint
+
+ def test_print_cmd(self):
+ group = ReplayProcessGroup(str(self.exename), self.rdbname)
+ group.go_forward(1)
+ assert group.get_current_time() == 2
+ with stdout_capture() as buf:
+ group.print_cmd('print-me')
+ assert buf.getvalue() == "$0 = stuff\n"
+ return group
+
+ def test_print_metavar(self):
+ group = self.test_print_cmd()
+ with stdout_capture() as buf:
+ group.print_cmd('$0', nids=[0])
+ assert buf.getvalue() == "$0 = stuff\n"
+
+ def test_jump_and_print_metavar(self):
+ group = self.test_print_cmd()
+ assert group.is_tainted()
+ group.jump_in_time(2)
+ with stdout_capture() as buf:
+ group.print_cmd('$0', nids=[0])
+ assert buf.getvalue() == "$0 = stuff\n"
From pypy.commits at gmail.com Fri Aug 12 04:36:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 01:36:44 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Failing test about setting up
a watchpoint: it doesn't force the object
Message-ID: <57ad8a9c.a710c20a.cc582.3c4b@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86164:66ba9208c0e9
Date: 2016-08-12 10:28 +0200
http://bitbucket.org/pypy/pypy/changeset/66ba9208c0e9/
Log: Failing test about setting up a watchpoint: it doesn't force the
object to be attached
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -77,11 +77,28 @@
dbstate.metavar = stuff
lambda_allocating = lambda: command_allocating
+ def command_compilewatch(cmd, expression):
+ revdb.send_watch("marshalled_code", ok_flag=1)
+ lambda_compilewatch = lambda: command_compilewatch
+
+ def command_checkwatch(cmd, marshalled_code):
+ assert marshalled_code == "marshalled_code"
+ # check that $0 exists
+ if dbstate.metavar is not None:
+ revdb.send_watch("ok, stuff exists\n", ok_flag=1)
+ else:
+ revdb.send_watch("stuff does not exist!\n", ok_flag=0)
+ lambda_checkwatch = lambda: command_checkwatch
+
def main(argv):
revdb.register_debug_command(100, lambda_blip)
revdb.register_debug_command(CMD_PRINT, lambda_print)
revdb.register_debug_command(CMD_ATTACHID, lambda_attachid)
revdb.register_debug_command("ALLOCATING", lambda_allocating)
+ revdb.register_debug_command(revdb.CMD_COMPILEWATCH,
+ lambda_compilewatch)
+ revdb.register_debug_command(revdb.CMD_CHECKWATCH,
+ lambda_checkwatch)
for i, op in enumerate(argv[1:]):
dbstate.stuff = Stuff()
dbstate.stuff.x = i + 1000
@@ -150,16 +167,35 @@
assert buf.getvalue() == "$0 = stuff\n"
return group
- def test_print_metavar(self):
- group = self.test_print_cmd()
+ def _print_metavar(self, group):
with stdout_capture() as buf:
group.print_cmd('$0', nids=[0])
assert buf.getvalue() == "$0 = stuff\n"
+ def test_print_metavar(self):
+ group = self.test_print_cmd()
+ self._print_metavar(group)
+
def test_jump_and_print_metavar(self):
group = self.test_print_cmd()
assert group.is_tainted()
group.jump_in_time(2)
- with stdout_capture() as buf:
- group.print_cmd('$0', nids=[0])
- assert buf.getvalue() == "$0 = stuff\n"
+ self._print_metavar(group)
+
+ def _check_watchpoint_expr(self, group, must_exist):
+ ok_flag, compiled_code = group.compile_watchpoint_expr("$0")
+ assert ok_flag == 1
+ assert compiled_code == "marshalled_code"
+ nids = [0]
+ ok_flag, text = group.check_watchpoint_expr(compiled_code, nids)
+ print text
+ assert ok_flag == must_exist
+
+ def test_check_watchpoint_expr(self):
+ group = self.test_print_cmd()
+ self._check_watchpoint_expr(group, must_exist=1)
+
+ def test_jump_and_check_watchpoint_expr(self):
+ group = self.test_print_cmd()
+ group.jump_in_time(2)
+ self._check_watchpoint_expr(group, must_exist=1)
From pypy.commits at gmail.com Fri Aug 12 04:36:45 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 01:36:45 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Fix
Message-ID: <57ad8a9d.c15e1c0a.435bd.790c@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86165:ff0f492637a5
Date: 2016-08-12 10:36 +0200
http://bitbucket.org/pypy/pypy/changeset/ff0f492637a5/
Log: Fix
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -447,6 +447,7 @@
def check_watchpoint_expr(self, compiled_code, nids=None):
if nids:
+ self.ensure_nids_to_uids(nids)
uids = self.nids_to_uids(nids)
self.attach_printed_objects(uids, watch_env=True)
self.active.send(Message(CMD_CHECKWATCH, extra=compiled_code))
@@ -542,6 +543,16 @@
uids.append(uid)
return uids
+ def ensure_nids_to_uids(self, nids):
+ # Take the objects listed in nids which are alive at the
+ # current time, and return a list of uids of them. This
+ # might require some replaying.
+ uids = []
+ if nids:
+ uids = self.nids_to_uids(nids, skip_futures=True)
+ self.ensure_printed_objects(uids)
+ return uids
+
def attach_printed_objects(self, uids, watch_env):
for uid in uids:
nid = self.all_printed_objects[uid]
@@ -559,11 +570,7 @@
def print_cmd(self, expression, nids=[]):
"""Print an expression.
"""
- uids = []
- if nids:
- uids = self.nids_to_uids(nids, skip_futures=True)
- self.ensure_printed_objects(uids)
- #
+ uids = self.ensure_nids_to_uids(nids)
self.active.tainted = True
self.attach_printed_objects(uids, watch_env=False)
self.active.send(Message(CMD_PRINT, extra=expression))
From pypy.commits at gmail.com Fri Aug 12 05:03:07 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 02:03:07 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Update printed text
Message-ID: <57ad90cb.c75dc20a.853ec.4a71@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86166:4a605deff6e0
Date: 2016-08-12 11:02 +0200
http://bitbucket.org/pypy/pypy/changeset/4a605deff6e0/
Log: Update printed text
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -978,12 +978,18 @@
fprintf(stderr,
"\n"
"In the replaying process, the addresses are different than\n"
- "in the recording process. We don't support this case for\n"
- "now, sorry. On Linux, check if Address Space Layout\n"
- "Randomization (ASLR) is enabled, and disable it with:\n"
+ "in the recording process. Make sure that the executable\n"
+ "\n"
+ " %s\n"
+ "\n"
+ "is the same one as the one that was used during recording.\n"
+ "If it is, then you may be hitting an issue with Address\n"
+ "Space Layout Randomization. On Linux, ASLR should be\n"
+ "automatically disabled, but just in case, the following\n"
+ "command disables it manually:\n"
"\n"
" echo 0 | sudo tee /proc/sys/kernel/randomize_va_space\n"
- "\n");
+ "\n", argv[0]);
exit(1);
}
*argc_p = h.argc;
@@ -1129,7 +1135,7 @@
fprintf(stderr, "%s:%d: Attempted to do I/O or access raw memory\n",
file, line);
if (flag_io_disabled != FID_POTENTIAL_IO) {
- fprintf(stderr, "but we are not in a jmpbuf_protected section\n");
+ fprintf(stderr, "but we are not in a protected section\n");
exit(1);
}
write_answer(ANSWER_ATTEMPT_IO, 0, 0, 0);
From pypy.commits at gmail.com Fri Aug 12 05:35:28 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 02:35:28 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: When entering debugger
commands, make floating-point results
Message-ID: <57ad9860.68adc20a.46caf.4dbd@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86167:c46e5f55f170
Date: 2016-08-12 11:34 +0200
http://bitbucket.org/pypy/pypy/changeset/c46e5f55f170/
Log: When entering debugger commands, make floating-point results
approximately work
diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py
--- a/rpython/rlib/rdtoa.py
+++ b/rpython/rlib/rdtoa.py
@@ -3,7 +3,7 @@
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator import cdir
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib import jit
+from rpython.rlib import jit, revdb
from rpython.rlib.rstring import StringBuilder
import py, sys
@@ -54,6 +54,8 @@
def strtod(input):
if len(input) > _INT_LIMIT:
raise MemoryError
+ if revdb.flag_io_disabled():
+ return revdb.emulate_strtod(input)
end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
try:
ll_input = rffi.str2charp(input)
@@ -236,6 +238,8 @@
special_strings=lower_special_strings, upper=False):
if precision > _INT_LIMIT:
raise MemoryError
+ if revdb.flag_io_disabled():
+ return revdb.emulate_dtoa(value)
decpt_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
try:
sign_ptr = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -7,7 +7,7 @@
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.annlowlevel import llhelper, hlstr
from rpython.rtyper.annlowlevel import cast_gcref_to_instance
-from rpython.rtyper.lltypesystem import rffi
+from rpython.rtyper.lltypesystem import lltype, rffi
CMD_PRINT = 1
@@ -81,6 +81,14 @@
"""
return llop.revdb_get_value(lltype.Signed, 'p')
+def flag_io_disabled():
+ """Returns True if we're in the debugger typing commands."""
+ if we_are_translated():
+ if fetch_translated_config().translation.reverse_debugger:
+ flag = llop.revdb_get_value(lltype.Signed, 'i')
+ return flag != ord('R') # FID_REGULAR_MODE
+ return False
+
## @specialize.arg(1)
## def go_forward(time_delta, callback):
## """For RPython debug commands: tells that after this function finishes,
@@ -203,3 +211,22 @@
def specialize_call(self, hop):
hop.exception_cannot_occur()
+
+
+# ____________________________________________________________
+
+# Emulation for strtod() and dtoa() when running debugger commands
+# (we can't easily just call C code there). The emulation can return
+# a crude result. Hack hack hack.
+
+_INVALID_STRTOD = -3.46739514239368e+113
+
+def emulate_strtod(input):
+ d = llop.revdb_strtod(lltype.Float, input)
+ if d == _INVALID_STRTOD:
+ raise ValueError
+ return d
+
+def emulate_dtoa(value):
+ s = llop.revdb_dtoa(lltype.Ptr(rstr.STR), value)
+ return hlstr(s)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -584,6 +584,8 @@
'revdb_weakref_deref': LLOp(),
'revdb_call_destructor': LLOp(),
'revdb_set_thread_breakpoint': LLOp(),
+ 'revdb_strtod': LLOp(sideeffects=False),
+ 'revdb_dtoa': LLOp(sideeffects=False),
}
# ***** Run test_lloperation after changes. *****
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -1523,6 +1523,8 @@
saved_state.unique_id_seen);
case 'p': /* current_place() */
return current_place;
+ case 'i': /* flag_io_disabled() */
+ return flag_io_disabled;
default:
return -1;
}
@@ -1734,6 +1736,39 @@
break_thread_num = (uint64_t)tnum;
}
+#define INVALID_STRTOD (-3.46739514239368e+113)
+
+RPY_EXTERN
+double rpy_reverse_db_strtod(RPyString *s)
+{
+ /* approximate hacks only */
+ double result;
+ char *endptr = NULL;
+ char buffer[8192];
+ size_t size = RPyString_Size(s);
+
+ if (size >= sizeof(buffer))
+ return INVALID_STRTOD;
+ memcpy(buffer, _RPyString_AsString(s), size);
+ buffer[size] = '\0';
+ result = strtod(buffer, &endptr);
+ if (endptr == NULL || *endptr != '\0')
+ return INVALID_STRTOD;
+ return result;
+}
+
+RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d)
+{
+ char buffer[128];
+ RPyString *result;
+ int size;
+ size = snprintf(buffer, sizeof(buffer), "%g", d);
+ if (size < 0) size = 0; /* XXX? */
+ result = make_rpy_string(size);
+ memcpy(_RPyString_AsString(result), buffer, size);
+ return result;
+}
+
/* ------------------------------------------------------------ */
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -230,6 +230,12 @@
#define OP_REVDB_SET_THREAD_BREAKPOINT(tnum, r) \
rpy_reverse_db_set_thread_breakpoint(tnum)
+#define OP_REVDB_STRTOD(s, r) \
+ r = rpy_reverse_db_strtod(s)
+
+#define OP_REVDB_DTOA(d, r) \
+ r = rpy_reverse_db_dtoa(d)
+
RPY_EXTERN void rpy_reverse_db_flush(void); /* must be called with the lock */
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
@@ -253,5 +259,7 @@
RPY_EXTERN void rpy_reverse_db_lock_acquire(bool_t lock_contention);
RPY_EXTERN void rpy_reverse_db_bad_acquire_gil(void);
RPY_EXTERN void rpy_reverse_db_set_thread_breakpoint(int64_t tnum);
+RPY_EXTERN double rpy_reverse_db_strtod(RPyString *s);
+RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d);
/* ------------------------------------------------------------ */
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -1,6 +1,6 @@
import py, sys
from cStringIO import StringIO
-from rpython.rlib import revdb
+from rpython.rlib import revdb, rdtoa
from rpython.rlib.debug import debug_print, ll_assert
from rpython.rtyper.annlowlevel import cast_gcref_to_instance
from rpython.translator.revdb.message import *
@@ -47,6 +47,10 @@
stuff = dbstate.stuff
elif extra == '$0':
stuff = dbstate.metavar
+ elif extra == '2.35':
+ val = rdtoa.strtod('2.35')
+ revdb.send_output(rdtoa.dtoa(val))
+ return
else:
assert False
uid = revdb.get_unique_id(stuff)
@@ -199,3 +203,9 @@
group = self.test_print_cmd()
group.jump_in_time(2)
self._check_watchpoint_expr(group, must_exist=1)
+
+ def test_rdtoa(self):
+ group = ReplayProcessGroup(str(self.exename), self.rdbname)
+ with stdout_capture() as buf:
+ group.print_cmd('2.35')
+ assert buf.getvalue() == "2.35"
From pypy.commits at gmail.com Fri Aug 12 05:54:06 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 02:54:06 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: translation fix
Message-ID: <57ad9cbe.c2a5c20a.de387.55eb@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86168:bec055ffd7c0
Date: 2016-08-12 11:53 +0200
http://bitbucket.org/pypy/pypy/changeset/bec055ffd7c0/
Log: translation fix
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -229,4 +229,6 @@
def emulate_dtoa(value):
s = llop.revdb_dtoa(lltype.Ptr(rstr.STR), value)
- return hlstr(s)
+ s = hlstr(s)
+ assert s is not None
+ return s
From pypy.commits at gmail.com Fri Aug 12 06:27:28 2016
From: pypy.commits at gmail.com (vext01)
Date: Fri, 12 Aug 2016 03:27:28 -0700 (PDT)
Subject: [pypy-commit] pypy refactor_rmmap: Refactor rmmap.py JIT support
into its own file.
Message-ID: <57ada490.c41f1c0a.9b2b9.a4aa@mx.google.com>
Author: Edd Barrett
Branch: refactor_rmmap
Changeset: r86169:a80e988edfd0
Date: 2016-08-12 11:26 +0100
http://bitbucket.org/pypy/pypy/changeset/a80e988edfd0/
Log: Refactor rmmap.py JIT support into its own file.
And test with a W^X patch to the build system.
diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py
--- a/rpython/jit/backend/llsupport/asmmemmgr.py
+++ b/rpython/jit/backend/llsupport/asmmemmgr.py
@@ -1,7 +1,7 @@
import sys
from rpython.rlib.rarithmetic import intmask, r_uint, LONG_BIT
from rpython.rlib.objectmodel import we_are_translated
-from rpython.rlib import rmmap
+from rpython.jit.backend.llsupport import rmmap
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.rtyper.lltypesystem import lltype, rffi
diff --git a/rpython/jit/backend/llsupport/rmmap.py b/rpython/jit/backend/llsupport/rmmap.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/llsupport/rmmap.py
@@ -0,0 +1,145 @@
+"""mmap for the JIT
+
+Derived from rlib.rmmap
+"""
+
+# borrow a few bits from our rlib cousin, but we must not share functions
+from rpython.rlib.rmmap import _POSIX, _MS_WINDOWS, _CYGWIN, constants, CConfig
+if _POSIX:
+ from rpython.rlib.rmmap import (
+ MAP_PRIVATE, MAP_ANONYMOUS, PROT_EXEC, PROT_READ, PROT_WRITE, PTR)
+if _MS_WINDOWS:
+ from rpython.rlib.rwin32 import LPDWORD, DWORD, BOOL
+ from rpython.rlib.rmmap import (
+ MEM_COMMIT, MEM_RESERVE, PAGE_EXECUTE_READWRITE, MEM_RELEASE)
+
+from rpython.rtyper.lltypesystem import rffi, lltype
+from rpython.rlib import rposix
+from rpython.rtyper.tool import rffi_platform
+
+
+locals().update(constants)
+
+
+def safe_external(name, args, result, save_err_on_unsafe=0, save_err_on_safe=0,
+ **kwargs):
+ return rffi.llexternal(name, args, result,
+ compilation_info=CConfig._compilation_info_,
+ sandboxsafe=True, releasegil=False,
+ save_err=save_err_on_safe, **kwargs)
+
+
+def safe_winexternal(name, args, result, **kwargs):
+ return rffi.llexternal(name, args, result,
+ compilation_info=CConfig._compilation_info_,
+ calling_conv='win', sandboxsafe=True,
+ releasegil=False, **kwargs)
+
+
+if _POSIX:
+ c_mmap_safe = safe_external(
+ 'mmap', [PTR, size_t, rffi.INT, rffi.INT, rffi.INT, off_t], PTR,
+ macro=True, save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO)
+
+ c_munmap_safe = safe_external('munmap', [PTR, size_t], rffi.INT)
+
+
+if _CYGWIN:
+ c_free_safe = safe_external('free', [PTR], lltype.Void, macro=True)
+ c_malloc_safe = safe_external('malloc', [size_t], PTR, macro=True)
+
+if _MS_WINDOWS:
+ VirtualAlloc_safe = safe_winexternal(
+ 'VirtualAlloc', [rffi.VOIDP, rffi.SIZE_T, DWORD, DWORD], rffi.VOIDP)
+
+ _VirtualProtect_safe = safe_winexternal(
+ 'VirtualProtect', [rffi.VOIDP, rffi.SIZE_T, DWORD, LPDWORD], BOOL)
+
+ def VirtualProtect(addr, size, mode, oldmode_ptr):
+ return _VirtualProtect_safe(
+ addr, rffi.cast(rffi.SIZE_T, size), rffi.cast(DWORD, mode),
+ oldmode_ptr)
+ VirtualProtect._annspecialcase_ = 'specialize:ll'
+
+ VirtualFree_safe = safe_winexternal(
+ 'VirtualFree', [rffi.VOIDP, rffi.SIZE_T, DWORD], BOOL)
+
+
+if _POSIX:
+ def alloc_hinted(hintp, map_size):
+ flags = MAP_PRIVATE | MAP_ANONYMOUS
+ prot = PROT_EXEC | PROT_READ | PROT_WRITE
+ return c_mmap_safe(hintp, map_size, prot, flags, -1, 0)
+
+ # XXX is this really necessary?
+ class Hint:
+ pos = -0x4fff0000 # for reproducible results
+ hint = Hint()
+
+ def alloc(map_size):
+ """Allocate memory. This is intended to be used by the JIT,
+ so the memory has the executable bit set and gets allocated
+ internally in case of a sandboxed process.
+ """
+ from errno import ENOMEM
+ from rpython.rlib import debug
+
+ if _CYGWIN:
+ # XXX: JIT memory should be using mmap MAP_PRIVATE with
+ # PROT_EXEC but Cygwin's fork() fails. mprotect()
+ # cannot be used, but seems to be unnecessary there.
+ res = c_malloc_safe(map_size)
+ if res == rffi.cast(PTR, 0):
+ raise MemoryError
+ return res
+ res = alloc_hinted(rffi.cast(PTR, hint.pos), map_size)
+ if res == rffi.cast(PTR, -1):
+ # some systems (some versions of OS/X?) complain if they
+ # are passed a non-zero address. Try again.
+ res = alloc_hinted(rffi.cast(PTR, 0), map_size)
+ if res == rffi.cast(PTR, -1):
+ # ENOMEM simply raises MemoryError, but other errors are fatal
+ if rposix.get_saved_errno() != ENOMEM:
+ debug.fatalerror_notb(
+ "Got an unexpected error trying to allocate some "
+ "memory for the JIT (tried to do mmap() with "
+ "PROT_EXEC|PROT_READ|PROT_WRITE). This can be caused "
+ "by a system policy like PAX. You need to find how "
+ "to work around the policy on your system.")
+ raise MemoryError
+ else:
+ hint.pos += map_size
+ return res
+ alloc._annenforceargs_ = (int,)
+
+ if _CYGWIN:
+ free = c_free_safe
+ else:
+ free = c_munmap_safe
+
+elif _MS_WINDOWS:
+ class Hint:
+ pos = -0x4fff0000 # for reproducible results
+ hint = Hint()
+ # XXX this has no effect on windows
+
+ def alloc(map_size):
+ """Allocate memory. This is intended to be used by the JIT,
+ so the memory has the executable bit set.
+ XXX implement me: it should get allocated internally in
+ case of a sandboxed process
+ """
+ null = lltype.nullptr(rffi.VOIDP.TO)
+ res = VirtualAlloc_safe(null, map_size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE)
+ if not res:
+ raise MemoryError
+ arg = lltype.malloc(LPDWORD.TO, 1, zero=True, flavor='raw')
+ VirtualProtect(res, map_size, PAGE_EXECUTE_READWRITE, arg)
+ lltype.free(arg, flavor='raw')
+ # ignore errors, just try
+ return res
+ alloc._annenforceargs_ = (int,)
+
+ def free(ptr, map_size):
+ VirtualFree_safe(ptr, 0, MEM_RELEASE)
diff --git a/rpython/jit/backend/x86/detect_feature.py b/rpython/jit/backend/x86/detect_feature.py
--- a/rpython/jit/backend/x86/detect_feature.py
+++ b/rpython/jit/backend/x86/detect_feature.py
@@ -1,17 +1,20 @@
import sys
import struct
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rmmap import alloc, free
+from rpython.rlib.rmmap import alloc, free, set_pages_executable
+
+CPU_INFO_SZ = 4096
def cpu_info(instr):
- data = alloc(4096)
+ data = alloc(CPU_INFO_SZ, no_exec=True)
pos = 0
for c in instr:
data[pos] = c
pos += 1
+ set_pages_executable(data, CPU_INFO_SZ)
fnptr = rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), data)
code = fnptr()
- free(data, 4096)
+ free(data, CPU_INFO_SZ)
return code
def detect_sse2():
diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py
--- a/rpython/rlib/rmmap.py
+++ b/rpython/rlib/rmmap.py
@@ -155,6 +155,8 @@
c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT,
rffi.INT, off_t], PTR, macro=True,
save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO)
+ c_mprotect, _ = external('mprotect',
+ [PTR, size_t, rffi.INT], rffi.INT)
# 'mmap' on linux32 is a macro that calls 'mmap64'
_, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT)
c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT,
@@ -705,14 +707,21 @@
m.setdata(res, map_size)
return m
- def alloc_hinted(hintp, map_size):
+ def alloc_hinted(hintp, map_size, no_exec=False):
flags = MAP_PRIVATE | MAP_ANONYMOUS
- prot = PROT_EXEC | PROT_READ | PROT_WRITE
+ prot = PROT_READ | PROT_WRITE
+ if not no_exec:
+ prot |= PROT_EXEC
if we_are_translated():
flags = NonConstant(flags)
prot = NonConstant(prot)
return c_mmap_safe(hintp, map_size, prot, flags, -1, 0)
+ def set_pages_executable(addr, size):
+ rv = c_mprotect(addr, size, PROT_EXEC)
+ if rv < 0:
+ debug.fatalerror_notb("set_pages_executable failed")
+
def clear_large_memory_chunk_aligned(addr, map_size):
addr = rffi.cast(PTR, addr)
flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS
@@ -728,7 +737,7 @@
pos = -0x4fff0000 # for reproducible results
hint = Hint()
- def alloc(map_size):
+ def alloc(map_size, no_exec=False):
"""Allocate memory. This is intended to be used by the JIT,
so the memory has the executable bit set and gets allocated
internally in case of a sandboxed process.
@@ -744,11 +753,11 @@
if res == rffi.cast(PTR, 0):
raise MemoryError
return res
- res = alloc_hinted(rffi.cast(PTR, hint.pos), map_size)
+ res = alloc_hinted(rffi.cast(PTR, hint.pos), map_size, no_exec=no_exec)
if res == rffi.cast(PTR, -1):
# some systems (some versions of OS/X?) complain if they
# are passed a non-zero address. Try again.
- res = alloc_hinted(rffi.cast(PTR, 0), map_size)
+ res = alloc_hinted(rffi.cast(PTR, 0), map_size, no_exec=no_exec)
if res == rffi.cast(PTR, -1):
# ENOMEM simply raises MemoryError, but other errors are fatal
if rposix.get_saved_errno() != ENOMEM:
@@ -762,7 +771,7 @@
else:
hint.pos += map_size
return res
- alloc._annenforceargs_ = (int,)
+ alloc._annenforceargs_ = (int, bool)
if _CYGWIN:
free = c_free_safe
@@ -933,11 +942,13 @@
hint = Hint()
# XXX this has no effect on windows
- def alloc(map_size):
+ def alloc(map_size, no_exec=no_exec):
"""Allocate memory. This is intended to be used by the JIT,
so the memory has the executable bit set.
XXX implement me: it should get allocated internally in
case of a sandboxed process
+
+ XXX no_exec not implemented on windows
"""
null = lltype.nullptr(rffi.VOIDP.TO)
res = VirtualAlloc_safe(null, map_size, MEM_COMMIT | MEM_RESERVE,
@@ -949,7 +960,10 @@
lltype.free(arg, flavor='raw')
# ignore errors, just try
return res
- alloc._annenforceargs_ = (int,)
+ alloc._annenforceargs_ = (int, bool)
+
+ def set_pages_executable(addr, size):
+ pass # XXX not implemented on windows
def free(ptr, map_size):
VirtualFree_safe(ptr, 0, MEM_RELEASE)
From pypy.commits at gmail.com Fri Aug 12 09:01:30 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 06:01:30 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Emulate modf. Fix emulation of
dtoa(2.0) to output the ".0" too.
Message-ID: <57adc8aa.10a81c0a.b1ccd.e1a1@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86170:44f0653642eb
Date: 2016-08-12 15:00 +0200
http://bitbucket.org/pypy/pypy/changeset/44f0653642eb/
Log: Emulate modf. Fix emulation of dtoa(2.0) to output the ".0" too.
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -232,3 +232,7 @@
s = hlstr(s)
assert s is not None
return s
+
+def emulate_modf(x):
+ return (llop.revdb_modf(lltype.Float, x, 0),
+ llop.revdb_modf(lltype.Float, x, 1))
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -586,6 +586,7 @@
'revdb_set_thread_breakpoint': LLOp(),
'revdb_strtod': LLOp(sideeffects=False),
'revdb_dtoa': LLOp(sideeffects=False),
+ 'revdb_modf': LLOp(sideeffects=False),
}
# ***** Run test_lloperation after changes. *****
diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py
--- a/rpython/rtyper/lltypesystem/module/ll_math.py
+++ b/rpython/rtyper/lltypesystem/module/ll_math.py
@@ -4,7 +4,7 @@
import sys
from rpython.translator import cdir
-from rpython.rlib import jit, rposix
+from rpython.rlib import jit, rposix, revdb
from rpython.rlib.rfloat import INFINITY, NAN, isfinite, isinf, isnan
from rpython.rlib.rposix import UNDERSCORE_ON_WIN32
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -221,6 +221,8 @@
def ll_math_modf(x):
# some platforms don't do the right thing for NaNs and
# infinities, so we take care of special cases directly.
+ if revdb.flag_io_disabled():
+ return revdb.emulate_modf(x)
if not isfinite(x):
if isnan(x):
return (x, x)
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -1759,11 +1759,18 @@
RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d)
{
- char buffer[128];
+ char buffer[128], *p;
RPyString *result;
int size;
- size = snprintf(buffer, sizeof(buffer), "%g", d);
- if (size < 0) size = 0; /* XXX? */
+ size = snprintf(buffer, sizeof(buffer) - 3, "%g", d);
+ if (size < 0)
+ size = 0;
+ for (p = buffer; '0' <= *p && *p <= '9'; p++) {
+ }
+ if (*p == 0) { /* a pure integer */
+ buffer[size++] = '.';
+ buffer[size++] = '0';
+ }
result = make_rpy_string(size);
memcpy(_RPyString_AsString(result), buffer, size);
return result;
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -236,6 +236,13 @@
#define OP_REVDB_DTOA(d, r) \
r = rpy_reverse_db_dtoa(d)
+#define OP_REVDB_MODF(x, index, r) \
+ do { \
+ double _r0, _r1; \
+ _r0 = modf(x, &_r1); \
+ r = (index == 0) ? _r0 : _r1; \
+ } while (0)
+
RPY_EXTERN void rpy_reverse_db_flush(void); /* must be called with the lock */
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -1,4 +1,4 @@
-import py, sys
+import py, sys, math
from cStringIO import StringIO
from rpython.rlib import revdb, rdtoa
from rpython.rlib.debug import debug_print, ll_assert
@@ -49,7 +49,9 @@
stuff = dbstate.metavar
elif extra == '2.35':
val = rdtoa.strtod('2.35')
- revdb.send_output(rdtoa.dtoa(val))
+ valx, valy = math.modf(val)
+ revdb.send_output(rdtoa.dtoa(valx) + '\n')
+ revdb.send_output(rdtoa.dtoa(valy) + '\n')
return
else:
assert False
@@ -208,4 +210,4 @@
group = ReplayProcessGroup(str(self.exename), self.rdbname)
with stdout_capture() as buf:
group.print_cmd('2.35')
- assert buf.getvalue() == "2.35"
+ assert buf.getvalue() == "0.35\n2.0\n"
From pypy.commits at gmail.com Fri Aug 12 10:13:10 2016
From: pypy.commits at gmail.com (vext01)
Date: Fri, 12 Aug 2016 07:13:10 -0700 (PDT)
Subject: [pypy-commit] pypy refactor_rmmap: Close branch.
Message-ID: <57add976.6aaac20a.437bd.c571@mx.google.com>
Author: Edd Barrett
Branch: refactor_rmmap
Changeset: r86171:9f7d18f5d82f
Date: 2016-08-12 15:12 +0100
http://bitbucket.org/pypy/pypy/changeset/9f7d18f5d82f/
Log: Close branch.
From pypy.commits at gmail.com Fri Aug 12 11:49:30 2016
From: pypy.commits at gmail.com (mjacob)
Date: Fri, 12 Aug 2016 08:49:30 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: hg merge py3k
Message-ID: <57adf00a.a717c20a.4589.e4c0@mx.google.com>
Author: Manuel Jacob
Branch: py3.5
Changeset: r86172:d1ba25403058
Date: 2016-08-12 17:48 +0200
http://bitbucket.org/pypy/pypy/changeset/d1ba25403058/
Log: hg merge py3k
diff too long, truncating to 2000 out of 4110 lines
diff --git a/include/PyPy.h b/include/PyPy.h
--- a/include/PyPy.h
+++ b/include/PyPy.h
@@ -2,7 +2,11 @@
#define _PYPY_H_
/* This header is meant to be included in programs that use PyPy as an
- embedded library. */
+ embedded library.
+
+ NOTE: this is deprecated. Instead, use cffi's embedding support:
+ http://cffi.readthedocs.org/en/latest/embedding.html
+*/
#ifdef __cplusplus
extern "C" {
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.7.0
+Version: 1.8.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
@@ -59,7 +61,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
@@ -196,20 +198,6 @@
return NULL;
}
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
- const char *fnname)
-{
- if (PyTuple_GET_SIZE(args_tuple) != expected) {
- PyErr_Format(PyExc_TypeError,
- "%.150s() takes exactly %zd arguments (%zd given)",
- fnname, expected, PyTuple_GET_SIZE(args_tuple));
- return NULL;
- }
- return &PyTuple_GET_ITEM(args_tuple, 0); /* pointer to the first item,
- the others follow */
-}
-
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
+ if self.ffi._embedding is None:
+ prnt('#define Py_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
@@ -683,13 +685,11 @@
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
- prnt(' PyObject **aa;')
prnt()
- prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
- prnt(' if (aa == NULL)')
+ prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+ name, len(rng), len(rng),
+ ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
- for i in rng:
- prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
+ self._check_not_opaque(fldtype,
+ "field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
@@ -911,6 +913,13 @@
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
+ def _check_not_opaque(self, tp, location):
+ while isinstance(tp, model.ArrayType):
+ tp = tp.item
+ if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+ raise TypeError(
+ "%s is of an opaque type (not declared in cdef())" % location)
+
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would be a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
Defaults to 1/2 of your cache or ``4M``.
Small values (like 1 or 1KB) are useful for debugging.
-``PYPY_GC_NURSERY_CLEANUP``
- The interval at which nursery is cleaned up. Must
- be smaller than the nursery size and bigger than the
- biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+ If set to non-zero, will fill nursery with garbage, to help
+ debugging.
``PYPY_GC_INCREMENT_STEP``
The size of memory marked during the marking step. Default is size of
@@ -62,3 +61,8 @@
use.
Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+ The maximal number of pinned objects at any point in time. Defaults
+ to a conservative value depending on nursery size and maximum object
+ size inside the nursery. Useful for debugging by setting it to 0.
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
pypy
======
+.. note: this is turned into a regular man page "pypy.1" by
+ doing "make man" in pypy/doc/
+
SYNOPSIS
========
@@ -48,6 +51,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -105,3 +105,26 @@
.. branch: ep2016sprint
Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
+
+.. branch: cpyext-realloc
+
+Implement PyObject_Realloc
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -114,8 +114,15 @@
return getattr(space, name)(operand)
return do_fold
-def _fold_pow(space, left, right):
- return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+ # don't constant-fold if "w_left" and "w_right" are integers and
+ # the estimated bit length of the power is unreasonably large
+ space.appexec([w_left, w_right], """(left, right):
+ if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+ if left.bit_length() * right > 5000:
+ raise OverflowError
+ """)
+ return space.pow(w_left, w_right, space.w_None)
def _fold_not(space, operand):
return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1322,6 +1322,25 @@
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
+ def test_dont_fold_huge_powers(self):
+ for source in (
+ "2 ** 3000", # not constant-folded: too big
+ "(-2) ** 3000",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER in counts
+
+ for source in (
+ "2 ** 2000", # constant-folded
+ "2 ** -3000",
+ "1.001 ** 3000",
+ "1 ** 3000.0",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER not in counts
+
def test_call_function_var(self):
source = """call(*me)"""
code, blocks = generate_function_code(source, self.space)
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1801,6 +1801,40 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
+ def resource_warning(self, w_msg, w_tb):
+ self.appexec([w_msg, w_tb],
+ """(msg, tb):
+ import sys
+ print >> sys.stderr, msg
+ if tb:
+ print >> sys.stderr, "Created at (most recent call last):"
+ print >> sys.stderr, tb
+ """)
+
+ def format_traceback(self):
+ # we need to disable track_resources before calling the traceback
+ # module. Else, it tries to open more files to format the traceback,
+ # the file constructor will call space.format_traceback etc., in an
+ # inifite recursion
+ flag = self.sys.track_resources
+ self.sys.track_resources = False
+ try:
+ return self.appexec([],
+ """():
+ import sys, traceback
+ # the "1" is because we don't want to show THIS code
+ # object in the traceback
+ try:
+ f = sys._getframe(1)
+ except ValueError:
+ # this happens if you call format_traceback at the very beginning
+ # of startup, when there is no bottom code object
+ return ''
+ return "".join(traceback.format_stack(f))
+ """)
+ finally:
+ self.sys.track_resources = flag
+
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -209,6 +209,13 @@
self.check(['-c', 'pass'], {'PYTHONNOUSERSITE': '1'}, sys_argv=['-c'],
run_command='pass', **expected)
+ def test_track_resources(self, monkeypatch):
+ myflag = [False]
+ def pypy_set_track_resources(flag):
+ myflag[0] = flag
+ monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
+ self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
+ assert myflag[0] == True
class TestInteraction:
"""
@@ -1152,4 +1159,3 @@
# assert it did not crash
finally:
sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -458,3 +458,28 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
+
+ def test_format_traceback(self):
+ from pypy.tool.pytest.objspace import maketestobjspace
+ from pypy.interpreter.gateway import interp2app
+ #
+ def format_traceback(space):
+ return space.format_traceback()
+ #
+ space = maketestobjspace()
+ w_format_traceback = space.wrap(interp2app(format_traceback))
+ w_tb = space.appexec([w_format_traceback], """(format_traceback):
+ def foo():
+ return bar()
+ def bar():
+ return format_traceback()
+ return foo()
+ """)
+ tb = space.str_w(w_tb)
+ expected = '\n'.join([
+ ' File "?", line 6, in anonymous', # this is the appexec code object
+ ' File "?", line 3, in foo',
+ ' File "?", line 5, in bar',
+ ''
+ ])
+ assert tb == expected
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.7.0"
+VERSION = "1.8.0"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -16,8 +16,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -30,6 +30,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -72,9 +75,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("bytes or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -262,8 +263,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = w_init.str_w(space)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -300,10 +309,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -402,21 +402,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py
--- a/pypy/module/_posixsubprocess/interp_subprocess.py
+++ b/pypy/module/_posixsubprocess/interp_subprocess.py
@@ -15,8 +15,9 @@
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
- includes=['unistd.h', 'sys/syscall.h'])
+ includes=['unistd.h', 'sys/syscall.h', 'sys/stat.h'])
HAVE_SYS_SYSCALL_H = platform.Has("syscall")
+ HAVE_SYS_STAT_H = platform.Has("stat")
HAVE_SETSID = platform.Has("setsid")
config = platform.configure(CConfig)
@@ -29,6 +30,8 @@
compile_extra = []
if config['HAVE_SYS_SYSCALL_H']:
compile_extra.append("-DHAVE_SYS_SYSCALL_H")
+if config['HAVE_SYS_STAT_H']:
+ compile_extra.append("-DHAVE_SYS_STAT_H")
if config['HAVE_SETSID']:
compile_extra.append("-DHAVE_SETSID")
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,6 +1,7 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
+from pypy.interpreter.gateway import interp2app
from rpython.tool.udir import udir
from rpython.rlib import rsocket
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -13,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -42,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -58,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -139,7 +138,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -155,9 +154,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -176,9 +175,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -197,7 +196,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -231,7 +230,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -653,11 +652,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -673,11 +672,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -146,7 +146,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -158,7 +158,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -192,7 +192,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -204,7 +204,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -239,7 +239,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def _RAND_bytes(space, n, pseudo):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -11,6 +11,9 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from rpython.rlib.objectmodel import dont_inline
+from rpython.rlib.rfile import (FILEP, c_fread, c_fclose, c_fwrite,
+ c_fdopen, c_fileno,
+ c_fopen)# for tests
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -84,44 +87,32 @@
assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
-FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
dash = '_'
else:
dash = ''
-fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
-fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
- FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
-_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
- if not is_valid_fd(fileno(fp)):
+ if not is_valid_fd(c_fileno(fp)):
return -1
- return _fclose(fp)
+ return c_fclose(fp)
-_fwrite = rffi.llexternal('fwrite',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fwrite(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fwrite(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fwrite(buf, sz, n, fp)
-_fread = rffi.llexternal('fread',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fread(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fread(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fread(buf, sz, n, fp)
_feof = rffi.llexternal('feof', [FILEP], rffi.INT)
def feof(fp):
- validate_fd(fileno(fp))
+ validate_fd(c_fileno(fp))
return _feof(fp)
def is_valid_fp(fp):
- return is_valid_fd(fileno(fp))
+ return is_valid_fd(c_fileno(fp))
pypy_decl = 'pypy_decl.h'
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
diff --git a/pypy/module/cpyext/c-api.txt b/pypy/module/cpyext/c-api.txt
deleted file mode 100644
--- a/pypy/module/cpyext/c-api.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Reference Count
-===============
-
-XXX
-
-Borrowed References
-===================
-
-XXX
-
-PyStringObject support
-======================
-
-The problem
------------
-
-PyString_AsString() returns a (non-movable) pointer to the underlying
-buffer, whereas pypy strings are movable. C code may temporarily
-store this address and use it, as long as it owns a reference to the
-PyObject. There is no "release" function to specify that the pointer
-is not needed any more.
-
-Note that the pointer may be used to fill the initial value of
-string. This is valid only when the string was just allocated, and is
-not used elsewhere.
-
-Proposed solution
------------------
-
-Our emulation of the PyStringObject contains an additional member: a
-pointer to a char buffer; it may be NULL.
-
-- A string allocated by pypy will be converted into a PyStringObject
- with a NULL buffer. When PyString_AsString() is called, memory is
- allocated (with flavor='raw') and content is copied.
-
-- A string allocated with PyString_FromStringAndSize(NULL, size) will
- allocate a buffer with the specified size, but the reference won't
- be stored in the global map py_objects_r2w; there won't be a
- corresponding object in pypy. When from_ref() or Py_INCREF() is
- called, the pypy string is created, and added in py_objects_r2w.
- The buffer is then supposed to be immutable.
-
-- _PyString_Resize works only on not-yet-pypy'd strings, and returns a
- similar object.
-
-- PyString_Size don't need to force the object. (in this case, another
- "size" member is needed)
-
-- There could be an (expensive!) check in from_ref() that the buffer
- still corresponds to the pypy gc-managed string.
-
-PySequence_Fast support
-======================
-There are five functions for fast sequence access offered by the CPython API:
-
-PyObject* PySequence_Fast(PyObject *o, const char *m)
-
-PyObject* PySequence_Fast_GET_ITEM( PyObject *o, int i)
-
-PyObject** PySequence_Fast_ITEMS( PyObject *o)
-
-PyObject* PySequence_ITEM( PyObject *o, int i)
-
-int PySequence_Fast_GET_SIZE( PyObject *o)
-
-PyPy supports four of these, but does not support PySequence_Fast_ITEMS.
-(Various ways to support PySequence_Fast_ITEMS were considered. They all had
-two things in common: they would have taken a lot of work, and they would have
-resulted in incomplete semantics or in poor performance. We decided that a slow
-implementation of PySequence_Fast_ITEMS was not very useful.)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -25,6 +25,8 @@
flavor='raw',
add_memory_pressure=True)
+realloc = rffi.llexternal('realloc', [rffi.VOIDP, rffi.SIZE_T], rffi.VOIDP)
+
@cpython_api([rffi.VOIDP, size_t], rffi.VOIDP)
def PyObject_Realloc(space, ptr, size):
if not lltype.cast_ptr_to_int(ptr):
@@ -32,7 +34,7 @@
flavor='raw',
add_memory_pressure=True)
# XXX FIXME
- return lltype.nullptr(rffi.VOIDP.TO)
+ return realloc(ptr, size)
@cpython_api([rffi.VOIDP], lltype.Void)
def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -10,7 +10,7 @@
from pypy.objspace.std import tupleobject
from pypy.module.cpyext.tupleobject import PyTuple_Check, PyTuple_SetItem
-from pypy.module.cpyext.object import Py_IncRef, Py_DecRef
+from pypy.module.cpyext.pyobject import decref
from pypy.module.cpyext.dictobject import PyDict_Check
@@ -252,7 +252,7 @@
def setitem(self, w_list, index, w_obj):
storage = self.unerase(w_list.lstorage)
index = self._check_index(index, storage._length)
- Py_DecRef(w_list.space, storage._elems[index])
+ decref(w_list.space, storage._elems[index])
storage._elems[index] = make_ref(w_list.space, w_obj)
def length(self, w_list):
@@ -264,9 +264,8 @@
return storage._elems
def getslice(self, w_list, start, stop, step, length):
- #storage = self.unerase(w_list.lstorage)
- raise oefmt(w_list.space.w_NotImplementedError,
- "settting a slice of a PySequence_Fast is not supported")
+ w_list.switch_to_object_strategy()
+ return w_list.strategy.getslice(w_list, start, stop, step, length)
def getitems(self, w_list):
# called when switching list strategy, so convert storage
@@ -389,5 +388,5 @@
def __del__(self):
for i in range(self._length):
- Py_DecRef(self.space, self._elems[i])
+ decref(self.space, self._elems[i])
lltype.free(self._elems, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.eval import (
Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
-from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
+from pypy.module.cpyext.api import c_fopen, c_fclose, c_fileno, Py_ssize_tP
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.astcompiler import consts
from rpython.tool.udir import udir
@@ -130,19 +130,19 @@
def test_run_file(self, space, api):
filepath = udir / "cpyext_test_runfile.py"
filepath.write("raise ZeroDivisionError")
- fp = fopen(str(filepath), "rb")
+ fp = c_fopen(str(filepath), "rb")
filename = rffi.str2charp(str(filepath))
w_globals = w_locals = space.newdict()
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_ZeroDivisionError
api.PyErr_Clear()
# try again, but with a closed file
- fp = fopen(str(filepath), "rb")
- os.close(fileno(fp))
+ fp = c_fopen(str(filepath), "rb")
+ os.close(c_fileno(fp))
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_IOError
api.PyErr_Clear()
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -212,8 +212,9 @@
assert type(x) is float
assert x == -12.34
- @pytest.mark.skipif(True, reason='realloc not fully implemented')
def test_object_realloc(self):
+ if not self.runappdirect:
+ skip('no untranslated support for realloc')
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
"""
@@ -221,12 +222,11 @@
char *copy, *orig = PyObject_MALLOC(12);
memcpy(orig, "hello world", 12);
copy = PyObject_REALLOC(orig, 15);
+ /* realloc() takes care of freeing orig, if changed */
if (copy == NULL)
Py_RETURN_NONE;
ret = PyBytes_FromStringAndSize(copy, 12);
- if (copy != orig)
- PyObject_Free(copy);
- PyObject_Free(orig);
+ PyObject_Free(copy);
return ret;
""")])
x = module.realloctest()
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -78,6 +78,17 @@
assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+ def test_get_slice_fast(self, space, api):
+ w_t = space.wrap([1, 2, 3, 4, 5])
+ api.PySequence_Fast(w_t, "foo") # converts
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 2, 4)) == [3, 4]
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 1, -1)) == [2, 3, 4]
+
+ assert api.PySequence_DelSlice(w_t, 1, 4) == 0
+ assert space.eq_w(w_t, space.wrap([1, 5]))
+ assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
+ assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+
def test_iter(self, space, api):
w_t = space.wrap((1, 2))
w_iter = api.PySeqIter_New(w_t)
@@ -226,18 +237,33 @@
assert space.int_w(space.len(w_l)) == 10
-class XAppTestSequenceObject(AppTestCpythonExtensionBase):
- def test_sequenceobject(self):
+class AppTestSequenceObject(AppTestCpythonExtensionBase):
+ def test_fast(self):
module = self.import_extension('foo', [
("test_fast_sequence", "METH_VARARGS",
"""
- PyObject * o = PyTuple_GetItem(args, 0);
+ int size, i;
+ PyTypeObject * common_type;
+ PyObject *foo, **objects;
+ PyObject * seq = PyTuple_GetItem(args, 0);
/* XXX assert it is a tuple */
- PyObject *foo = PySequence_Fast(o, "some string");
- PyObject ** res = PySequence_Fast_ITEMS(foo);
- /* XXX do some kind of test on res */
- /* XXX now what? who manages res's refcount? */
+ if (seq == NULL)
+ Py_RETURN_NONE;
+ foo = PySequence_Fast(seq, "some string");
+ objects = PySequence_Fast_ITEMS(foo);
+ size = PySequence_Fast_GET_SIZE(seq);
+ common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
+ for (i = 1; i < size; ++i) {
+ if (Py_TYPE(objects[i]) != common_type) {
+ common_type = NULL;
+ break;
+ }
+ }
+ Py_DECREF(foo);
+ Py_DECREF(common_type);
return PyBool_FromLong(1);
""")])
- assert module.test_fast_sequence([1, 2, 3, 4])
+ s = [1, 2, 3, 4]
+ assert module.test_fast_sequence(s[0:-1])
+ assert module.test_fast_sequence(s[::-1])
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -47,6 +47,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from rpython.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
@@ -54,7 +55,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, intmask(next_instr))
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -19,6 +19,7 @@
self.defaultencoding = "utf-8"
self.filesystemencoding = None
self.debug = True
+ self.track_resources = False
self.dlopenflags = rdynload._dlopen_default_mode()
interpleveldefs = {
@@ -48,6 +49,8 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
+ 'pypy_set_track_resources' : 'vm.set_track_resources',
+ 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,6 +61,13 @@
"""
return space.wrap(space.sys.recursionlimit)
+ at unwrap_spec(flag=bool)
+def set_track_resources(space, flag):
+ space.sys.track_resources = flag
+
+def get_track_resources(space):
+ return space.wrap(space.sys.track_resources)
+
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
@@ -130,7 +130,7 @@
cls.module = str(udir.join('testownlib.dll'))
else:
subprocess.check_call(
- 'gcc testownlib.c -shared -fPIC -o testownlib.so',
+ 'cc testownlib.c -shared -fPIC -o testownlib.so',
cwd=str(udir), shell=True)
cls.module = str(udir.join('testownlib.so'))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -852,9 +852,12 @@
assert str(e2.value) == "foo0() takes no arguments (2 given)"
assert str(e3.value) == "foo1() takes exactly one argument (0 given)"
assert str(e4.value) == "foo1() takes exactly one argument (2 given)"
- assert str(e5.value) == "foo2() takes exactly 2 arguments (0 given)"
- assert str(e6.value) == "foo2() takes exactly 2 arguments (1 given)"
- assert str(e7.value) == "foo2() takes exactly 2 arguments (3 given)"
+ assert str(e5.value) in ["foo2 expected 2 arguments, got 0",
+ "foo2() takes exactly 2 arguments (0 given)"]
+ assert str(e6.value) in ["foo2 expected 2 arguments, got 1",
+ "foo2() takes exactly 2 arguments (1 given)"]
+ assert str(e7.value) in ["foo2 expected 2 arguments, got 3",
+ "foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
@@ -1916,3 +1919,47 @@
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
+def test_struct_field_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[2]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+
+def test_function_arg_opaque():
+ py.test.skip("can currently declare a function with an opaque struct "
+ "as argument, but AFAICT it's impossible to call it later")
+
+def test_function_returns_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a foo(int);")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_function_returns_opaque", "?")
+ assert str(e.value) == ("function foo: 'struct a' is used as result type,"
+ " but is opaque")
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -159,7 +159,6 @@
libraries=rtime.libraries
)
CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC")
- clock_t = platform.SimpleType("clock_t", rffi.ULONG)
has_gettimeofday = platform.Has('gettimeofday')
has_clock_gettime = platform.Has('clock_gettime')
CLOCK_PROF = platform.DefinedConstantInteger('CLOCK_PROF')
@@ -233,7 +232,6 @@
HAS_CLOCK_MONOTONIC = cConfig.CLOCK_MONOTONIC is not None
HAS_MONOTONIC = (_WIN or _MACOSX or
(HAS_CLOCK_GETTIME and (HAS_CLOCK_HIGHRES or HAS_CLOCK_MONOTONIC)))
-clock_t = cConfig.clock_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True)
@@ -1030,7 +1028,10 @@
with lltype.scoped_alloc(rposix.TMS) as tms:
ret = rposix.c_times(tms)
if rffi.cast(lltype.Signed, ret) != -1:
- cpu_time = float(tms.c_tms_utime + tms.c_tms_stime)
+ cpu_time = float(rffi.cast(lltype.Signed,
+ tms.c_tms_utime) +
+ rffi.cast(lltype.Signed,
+ tms.c_tms_stime))
if w_info is not None:
_setinfo(space, w_info, "times()",
1.0 / rposix.CLOCK_TICKS_PER_SECOND,
@@ -1038,7 +1039,7 @@
return space.wrap(cpu_time / rposix.CLOCK_TICKS_PER_SECOND)
return clock(space)
-_clock = external('clock', [], clock_t)
+_clock = external('clock', [], rposix.CLOCK_T)
def clock(space, w_info=None):
"""clock() -> floating point number
@@ -1052,7 +1053,7 @@
pass
value = _clock()
# Is this casting correct?
- if value == rffi.cast(clock_t, -1):
+ if intmask(value) == intmask(rffi.cast(rposix.CLOCK_T, -1)):
raise oefmt(space.w_RuntimeError,
"the processor time used is not available or its value"
"cannot be represented")
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -434,4 +434,5 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
+FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -930,6 +930,7 @@
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
def type_get_dict(space, w_cls):
+ w_cls = _check(space, w_cls)
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
w_dict = w_cls.getdict(space)
if w_dict is None:
@@ -1287,8 +1288,8 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise OperationError(space.w_TypeError, space.wrap(
- u"cycle among base classes: " + u' < '.join(names)))
+ raise oefmt(space.w_TypeError,
+ "cycle among base classes: %s", ' < '.join(names))
class TypeCache(SpaceCache):
diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py
--- a/pypy/tool/pytest/objspace.py
+++ b/pypy/tool/pytest/objspace.py
@@ -143,3 +143,5 @@
def is_w(self, obj1, obj2):
return obj1 is obj2
+ def setitem(self, obj, key, value):
+ obj[key] = value
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
--- a/rpython/annotator/binaryop.py
+++ b/rpython/annotator/binaryop.py
@@ -401,6 +401,9 @@
class __extend__(pairtype(SomeString, SomeTuple),
pairtype(SomeUnicodeString, SomeTuple)):
def mod((s_string, s_tuple)):
+ if not s_string.is_constant():
+ raise AnnotatorError("string formatting requires a constant "
+ "string/unicode on the left of '%'")
is_string = isinstance(s_string, SomeString)
is_unicode = isinstance(s_string, SomeUnicodeString)
assert is_string or is_unicode
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4623,6 +4623,14 @@
a = self.RPythonAnnotator()
a.build_types(main, [int])
+ def test_string_mod_nonconstant(self):
+ def f(x):
+ return x % 5
+ a = self.RPythonAnnotator()
+ e = py.test.raises(AnnotatorError, a.build_types, f, [str])
+ assert ('string formatting requires a constant string/unicode'
+ in str(e.value))
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -293,6 +293,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -304,6 +305,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
while frame.i > 3:
- jitdriver.can_enter_jit(frame=frame, total=total, j=j)
- jitdriver.jit_merge_point(frame=frame, total=total, j=j)
+ jitdriver.can_enter_jit(frame=frame, total=total, j=j,
+ prev_s=prev_s)
+ jitdriver.jit_merge_point(frame=frame, total=total, j=j,
+ prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
@@ -82,6 +86,11 @@
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
+ s = rstr.mallocstr(16)
+ rgc.ll_write_final_null_char(s)
+ rgc.ll_write_final_null_char(prev_s)
+ if (frame.i & 3) == 0:
+ prev_s = s
return chr(total % 253)
#
class Virt2(object):
diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py
--- a/rpython/jit/backend/ppc/opassembler.py
+++ b/rpython/jit/backend/ppc/opassembler.py
@@ -994,6 +994,7 @@
basesize, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
scale = 0
self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale)
diff --git a/rpython/jit/backend/test/test_rvmprof.py b/rpython/jit/backend/test/test_rvmprof.py
--- a/rpython/jit/backend/test/test_rvmprof.py
+++ b/rpython/jit/backend/test/test_rvmprof.py
@@ -2,48 +2,157 @@
from rpython.rlib import jit
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rvmprof import cintf
+from rpython.rlib.rvmprof import cintf, vmprof_execute_code, register_code,\
+ register_code_object_class, _get_vmprof
from rpython.jit.backend.x86.arch import WORD
from rpython.jit.codewriter.policy import JitPolicy
+
class BaseRVMProfTest(object):
- def test_one(self):
- py.test.skip("needs thread-locals in the JIT, which is only available "
- "after translation")
+
+ def setup_method(self, meth):
visited = []
def helper():
+ trace = []
stack = cintf.vmprof_tl_stack.getraw()
- if stack:
- # not during tracing
- visited.append(stack.c_value)
- else:
- visited.append(0)
+ while stack:
+ trace.append((stack.c_kind, stack.c_value))
+ stack = stack.c_next
+ visited.append(trace)
llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper)
- driver = jit.JitDriver(greens=[], reds='auto')
+ class CodeObj(object):
+ def __init__(self, name):
+ self.name = name
- def f(n):
+ def get_code_fn(codes, code, arg, c):
+ return code
+
+ def get_name(code):
+ return "foo"
+
+ _get_vmprof().use_weaklist = False
+ register_code_object_class(CodeObj, get_name)
+
+ self.misc = visited, llfn, CodeObj, get_code_fn, get_name
+
+
+ def teardown_method(self, meth):
+ del _get_vmprof().use_weaklist
+
+
+ def test_simple(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
i = 0
while i < n:
- driver.jit_merge_point()
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ c = f(codes, codes[1], 1, c)
+ else:
+ llfn()
+ c -= 1
i += 1
- llfn()
+ return c
- class Hooks(jit.JitHookInterface):
- def after_compile(self, debug_info):
- self.raw_start = debug_info.asminfo.rawstart
-
- hooks = Hooks()
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ return f(codes, codes[0], n, 8)
null = lltype.nullptr(cintf.VMPROFSTACK)
- cintf.vmprof_tl_stack.setraw(null) # make it empty
- self.meta_interp(f, [10], policy=JitPolicy(hooks))
- v = set(visited)
- assert 0 in v
- v.remove(0)
- assert len(v) == 1
- assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024
- assert cintf.vmprof_tl_stack.getraw() == null
- # ^^^ make sure we didn't leave anything dangling
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while i < n:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if code.name == "main":
+ try:
+ f(codes, codes[1], 1, c)
+ except MyExc as e:
+ c = e.c
+ else:
+ llfn()
+ c -= 1
+ i += 1
+ raise MyExc(c)
+
+ def main(n):
+ codes = [CodeObj("main"), CodeObj("not main")]
+ for code in codes:
+ register_code(code, get_name)
+ try:
+ f(codes, codes[0], n, 8)
+ except MyExc as e:
+ return e.c
+
+ null = lltype.nullptr(cintf.VMPROFSTACK)
+ cintf.vmprof_tl_stack.setraw(null)
+ self.meta_interp(main, [30], inline=True)
+ assert visited[:3] == [[(1, 12), (1, 8)], [(1, 12), (1, 8)], [(1, 12), (1, 8)]]
+
+
+ def test_leaving_with_exception_in_blackhole(self):
+ visited, llfn, CodeObj, get_code_fn, get_name = self.misc
+ driver = jit.JitDriver(greens=['code'], reds=['c', 'i', 'n', 'codes'])
+
+ class MyExc(Exception):
+ def __init__(self, c):
+ self.c = c
+
+ @vmprof_execute_code("main", get_code_fn,
+ _hack_update_stack_untranslated=True)
+ def f(codes, code, n, c):
+ i = 0
+ while True:
+ driver.jit_merge_point(code=code, c=c, i=i, codes=codes, n=n)
+ if i >= n:
From pypy.commits at gmail.com Fri Aug 12 11:58:26 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Fri, 12 Aug 2016 08:58:26 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Fix BUILD_SET_UNPACK by changing iterator
to iter instead of itervalues as w_item should never be a dict anyway
Message-ID: <57adf222.274fc20a.2cefb.eec0@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86173:f658ed1189a5
Date: 2016-08-12 17:57 +0200
http://bitbucket.org/pypy/pypy/changeset/f658ed1189a5/
Log: Fix BUILD_SET_UNPACK by changing iterator to iter instead of
itervalues as w_item should never be a dict anyway
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1375,10 +1375,11 @@
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
# cannot use w_sum.update, w_item might not be a set
- iterator = w_item.itervalues()
+ iterator = space.iter(w_item)
while True:
- w_value = iterator.next_value()
- if w_value is None:
+ try:
+ w_value = space.next(iterator)
+ except OperationError:
break
w_sum.add(w_value)
while itemcount != 0:
From pypy.commits at gmail.com Fri Aug 12 12:35:54 2016
From: pypy.commits at gmail.com (rlamy)
Date: Fri, 12 Aug 2016 09:35:54 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Remove code added by py2-specific branch
'resource_warning'
Message-ID: <57adfaea.c186c20a.6dddb.f379@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86174:b00718188b59
Date: 2016-08-12 17:35 +0100
http://bitbucket.org/pypy/pypy/changeset/b00718188b59/
Log: Remove code added by py2-specific branch 'resource_warning'
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1801,40 +1801,6 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
- def resource_warning(self, w_msg, w_tb):
- self.appexec([w_msg, w_tb],
- """(msg, tb):
- import sys
- print >> sys.stderr, msg
- if tb:
- print >> sys.stderr, "Created at (most recent call last):"
- print >> sys.stderr, tb
- """)
-
- def format_traceback(self):
- # we need to disable track_resources before calling the traceback
- # module. Else, it tries to open more files to format the traceback,
- # the file constructor will call space.format_traceback etc., in an
- # inifite recursion
- flag = self.sys.track_resources
- self.sys.track_resources = False
- try:
- return self.appexec([],
- """():
- import sys, traceback
- # the "1" is because we don't want to show THIS code
- # object in the traceback
- try:
- f = sys._getframe(1)
- except ValueError:
- # this happens if you call format_traceback at the very beginning
- # of startup, when there is no bottom code object
- return ''
- return "".join(traceback.format_stack(f))
- """)
- finally:
- self.sys.track_resources = flag
-
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -209,14 +209,6 @@
self.check(['-c', 'pass'], {'PYTHONNOUSERSITE': '1'}, sys_argv=['-c'],
run_command='pass', **expected)
- def test_track_resources(self, monkeypatch):
- myflag = [False]
- def pypy_set_track_resources(flag):
- myflag[0] = flag
- monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
- self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
- assert myflag[0] == True
-
class TestInteraction:
"""
These tests require pexpect (UNIX-only).
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -134,7 +134,7 @@
assert self.space.lookup(w_instance, "gobbledygook") is None
w_instance = self.space.appexec([], """():
class Lookup(object):
- "bla"
+ "bla"
return Lookup()""")
assert self.space.str_w(self.space.lookup(w_instance, "__doc__")) == "bla"
@@ -148,7 +148,7 @@
assert is_callable(w_func)
w_lambda_func = self.space.appexec([], "(): return lambda: True")
assert is_callable(w_lambda_func)
-
+
w_instance = self.space.appexec([], """():
class Call(object):
def __call__(self): pass
@@ -308,7 +308,7 @@
def test_call_obj_args(self):
from pypy.interpreter.argument import Arguments
-
+
space = self.space
w_f = space.appexec([], """():
@@ -333,7 +333,7 @@
assert w_x is w_9
assert w_y is w_1
- w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
+ w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
assert w_res is w_9
def test_compare_by_iteration(self):
@@ -383,7 +383,7 @@
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('g')))
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('h')))
-class TestModuleMinimal:
+class TestModuleMinimal:
def test_sys_exists(self):
assert self.space.sys
@@ -458,28 +458,3 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
-
- def test_format_traceback(self):
- from pypy.tool.pytest.objspace import maketestobjspace
- from pypy.interpreter.gateway import interp2app
- #
- def format_traceback(space):
- return space.format_traceback()
- #
- space = maketestobjspace()
- w_format_traceback = space.wrap(interp2app(format_traceback))
- w_tb = space.appexec([w_format_traceback], """(format_traceback):
- def foo():
- return bar()
- def bar():
- return format_traceback()
- return foo()
- """)
- tb = space.str_w(w_tb)
- expected = '\n'.join([
- ' File "?", line 6, in anonymous', # this is the appexec code object
- ' File "?", line 3, in foo',
- ' File "?", line 5, in bar',
- ''
- ])
- assert tb == expected
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -49,8 +49,6 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
- 'pypy_set_track_resources' : 'vm.set_track_resources',
- 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,13 +61,6 @@
"""
return space.wrap(space.sys.recursionlimit)
- at unwrap_spec(flag=bool)
-def set_track_resources(space, flag):
- space.sys.track_resources = flag
-
-def get_track_resources(space):
- return space.wrap(space.sys.track_resources)
-
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -434,5 +434,4 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
-FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
From pypy.commits at gmail.com Fri Aug 12 12:53:25 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 09:53:25 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Next emulation (we'll see how
far it makes sense to continue)
Message-ID: <57adff05.497bc20a.13214.f7a8@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86175:9dbb62851b3f
Date: 2016-08-12 18:52 +0200
http://bitbucket.org/pypy/pypy/changeset/9dbb62851b3f/
Log: Next emulation (we'll see how far it makes sense to continue)
diff --git a/rpython/rlib/revdb.py b/rpython/rlib/revdb.py
--- a/rpython/rlib/revdb.py
+++ b/rpython/rlib/revdb.py
@@ -236,3 +236,7 @@
def emulate_modf(x):
return (llop.revdb_modf(lltype.Float, x, 0),
llop.revdb_modf(lltype.Float, x, 1))
+
+def emulate_frexp(x):
+ return (llop.revdb_frexp(lltype.Float, x, 0),
+ int(llop.revdb_frexp(lltype.Float, x, 1)))
diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -587,6 +587,7 @@
'revdb_strtod': LLOp(sideeffects=False),
'revdb_dtoa': LLOp(sideeffects=False),
'revdb_modf': LLOp(sideeffects=False),
+ 'revdb_frexp': LLOp(sideeffects=False),
}
# ***** Run test_lloperation after changes. *****
diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py
--- a/rpython/rtyper/lltypesystem/module/ll_math.py
+++ b/rpython/rtyper/lltypesystem/module/ll_math.py
@@ -185,6 +185,8 @@
mantissa = x
exponent = 0
else:
+ if revdb.flag_io_disabled():
+ return revdb.emulate_frexp(x)
exp_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
try:
mantissa = math_frexp(x, exp_p)
diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
--- a/rpython/translator/revdb/src-revdb/revdb_include.h
+++ b/rpython/translator/revdb/src-revdb/revdb_include.h
@@ -243,6 +243,13 @@
r = (index == 0) ? _r0 : _r1; \
} while (0)
+#define OP_REVDB_FREXP(x, index, r) \
+ do { \
+ double _r0; int _r1; \
+ _r0 = frexp(x, &_r1); \
+ r = (index == 0) ? _r0 : _r1; \
+ } while (0)
+
RPY_EXTERN void rpy_reverse_db_flush(void); /* must be called with the lock */
RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -52,6 +52,9 @@
valx, valy = math.modf(val)
revdb.send_output(rdtoa.dtoa(valx) + '\n')
revdb.send_output(rdtoa.dtoa(valy) + '\n')
+ xx, yy = math.frexp(val)
+ revdb.send_output(rdtoa.dtoa(xx) + '\n')
+ revdb.send_output('%d\n' % yy)
return
else:
assert False
@@ -210,4 +213,4 @@
group = ReplayProcessGroup(str(self.exename), self.rdbname)
with stdout_capture() as buf:
group.print_cmd('2.35')
- assert buf.getvalue() == "0.35\n2.0\n"
+ assert buf.getvalue() == "0.35\n2.0\n0.5875\n2\n"
From pypy.commits at gmail.com Fri Aug 12 12:59:04 2016
From: pypy.commits at gmail.com (rlamy)
Date: Fri, 12 Aug 2016 09:59:04 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix translation
Message-ID: <57ae0058.e129c20a.4ece9.f97a@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86176:b1def8b63787
Date: 2016-08-12 17:58 +0100
http://bitbucket.org/pypy/pypy/changeset/b1def8b63787/
Log: Fix translation
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -267,7 +267,7 @@
space = self.space
if self.accept_str and space.isinstance_w(w_init, space.w_str):
# special case to optimize strings passed to a "char *" argument
- value = w_init.str_w(space)
+ value = space.bytes_w(w_init)
keepalives[i] = value
buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
rffi.cast(rffi.CCHARPP, cdata)[0] = buf
From pypy.commits at gmail.com Fri Aug 12 13:49:25 2016
From: pypy.commits at gmail.com (rlamy)
Date: Fri, 12 Aug 2016 10:49:25 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Fix bad change in b053ff5c2d6d
Message-ID: <57ae0c25.94a51c0a.a5438.4c86@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86177:3a81057901f3
Date: 2016-08-12 18:48 +0100
http://bitbucket.org/pypy/pypy/changeset/3a81057901f3/
Log: Fix bad change in b053ff5c2d6d
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1288,8 +1288,9 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise oefmt(space.w_TypeError,
- "cycle among base classes: %s", ' < '.join(names))
+ # Can't use oefmt() here, since names is a list of unicodes
+ raise OperationError(space.w_TypeError, space.newunicode(
+ u"cycle among base classes: " + u' < '.join(names)))
class TypeCache(SpaceCache):
From pypy.commits at gmail.com Fri Aug 12 15:00:19 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 12:00:19 -0700 (PDT)
Subject: [pypy-commit] pypy default: Test and probable fix (thanks sbauman)
Message-ID: <57ae1cc3.151a1c0a.a5160.638c@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86178:cd39a869a312
Date: 2016-08-12 20:59 +0200
http://bitbucket.org/pypy/pypy/changeset/cd39a869a312/
Log: Test and probable fix (thanks sbauman)
diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py
--- a/rpython/rlib/rweakref.py
+++ b/rpython/rlib/rweakref.py
@@ -142,7 +142,7 @@
def compute_result_annotation(self, s_keyclass, s_valueclass):
assert s_keyclass.is_constant()
- s_key = self.bookkeeper.immutablevalue(s_keyclass.const())
+ s_key = self.bookkeeper.valueoftype(s_keyclass.const)
return SomeWeakValueDict(
s_key,
_getclassdef(s_valueclass))
@@ -158,7 +158,7 @@
bk = self.bookkeeper
x = self.instance
return SomeWeakValueDict(
- bk.immutablevalue(x._keyclass()),
+ bk.valueoftype(x._keyclass),
bk.getuniqueclassdef(x._valueclass))
def _getclassdef(s_instance):
diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py
--- a/rpython/rlib/test/test_rweakvaldict.py
+++ b/rpython/rlib/test/test_rweakvaldict.py
@@ -180,3 +180,36 @@
RWeakValueDictionary(str, X).get("foobar")
RWeakValueDictionary(int, Y).get(42)
interpret(g, [])
+
+def test_key_instance():
+ class K(object):
+ pass
+ keys = [K(), K(), K()]
+
+ def g(d):
+ assert d.get(keys[3]) is None
+ x1 = X(); x2 = X(); x3 = X()
+ d.set(keys[0], x1)
+ d.set(keys[1], x2)
+ d.set(keys[2], x3)
+ assert d.get(keys[0]) is x1
+ assert d.get(keys[1]) is x2
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ return x1, x3 # x2 dies
+ def f():
+ keys.append(K())
+ d = RWeakValueDictionary(K, X)
+ x1, x3 = g(d)
+ rgc.collect(); rgc.collect()
+ assert d.get(keys[0]) is x1
+ assert d.get(keys[1]) is None
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ d.set(keys[0], None)
+ assert d.get(keys[0]) is None
+ assert d.get(keys[1]) is None
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ f()
+ interpret(f, [])
From pypy.commits at gmail.com Fri Aug 12 15:27:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 12:27:39 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Install a minimal __import__
hook to use interactively. At least this
Message-ID: <57ae232b.11051c0a.eea9a.6834@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86179:423804b65c4c
Date: 2016-08-12 21:27 +0200
http://bitbucket.org/pypy/pypy/changeset/423804b65c4c/
Log: Install a minimal __import__ hook to use interactively. At least
this version doesn't try to look for a file in the current
directory, e.g. if we do "import math" instead a package, which just
fails with "Attempted to do I/O".
diff --git a/pypy/interpreter/reverse_debugging.py b/pypy/interpreter/reverse_debugging.py
--- a/pypy/interpreter/reverse_debugging.py
+++ b/pypy/interpreter/reverse_debugging.py
@@ -321,18 +321,52 @@
revdb.send_output(s)
revdb.send_output("\n")
+ at gateway.unwrap_spec(name='str0', level=int)
+def revdb_importhook(space, name, w_globals=None,
+ w_locals=None, w_fromlist=None, level=-1):
+ # Incredibly simplified version of __import__, which only returns
+ # already-imported modules and doesn't call any custom import
+ # hooks. Recognizes only absolute imports. With a 'fromlist'
+ # argument that is a non-empty list, returns the module 'name3' if
+ # the 'name' argument is 'name1.name2.name3'. With an empty or
+ # None 'fromlist' argument, returns the module 'name1' instead.
+ return space.appexec([space.wrap(name), w_fromlist or space.w_None,
+ space.wrap(level), space.wrap(space.sys)],
+ """(name, fromlist, level, sys):
+ if level > 0:
+ raise ImportError("only absolute imports are "
+ "supported in the debugger")
+ basename = name.split('.')[0]
+ try:
+ basemod = sys.modules[basename]
+ mod = sys.modules[name]
+ except KeyError:
+ raise ImportError("'%s' not found or not imported yet "
+ "(the debugger can't import new modules, "
+ "and only supports absolute imports)" % (name,))
+ if fromlist:
+ return mod
+ return basemod
+ """)
+
@specialize.memo()
def get_revdb_displayhook(space):
return space.wrap(gateway.interp2app(revdb_displayhook))
+ at specialize.memo()
+def get_revdb_importhook(space):
+ return space.wrap(gateway.interp2app(revdb_importhook))
+
def prepare_print_environment(space):
assert not dbstate.standard_code
w_revdb_output = space.wrap(W_RevDBOutput(space))
w_displayhook = get_revdb_displayhook(space)
+ w_import = get_revdb_importhook(space)
space.sys.setdictvalue(space, 'stdout', w_revdb_output)
space.sys.setdictvalue(space, 'stderr', w_revdb_output)
space.sys.setdictvalue(space, 'displayhook', w_displayhook)
+ space.builtin.setdictvalue(space, '__import__', w_import)
def command_print(cmd, expression):
frame = fetch_cur_frame()
From pypy.commits at gmail.com Fri Aug 12 16:54:09 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 13:54:09 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Trying to support pressing
Ctrl-C to interrupt the current operation
Message-ID: <57ae3771.497bc20a.13214.4441@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86180:2311c3b8a675
Date: 2016-08-12 22:44 +0200
http://bitbucket.org/pypy/pypy/changeset/2311c3b8a675/
Log: Trying to support pressing Ctrl-C to interrupt the current operation
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -35,59 +35,79 @@
self.print_extra_pending_info = None
def interact(self):
- last_command = 'help'
- previous_time = None
- previous_thread = 0
+ self.last_command = 'help'
+ self.previous_time = None
+ self.previous_thread = 0
while True:
+ prompt = self.print_lines_before_prompt()
+ try:
+ while True:
+ cmdline = self.display_prompt(prompt)
+ self.run_command(cmdline)
+ prompt = self.print_lines_before_prompt()
+ except KeyboardInterrupt:
+ self.pgroup.recreate_subprocess(self.previous_time or 1)
+ self.last_command = ''
+ self.previous_thread = '?'
+ self.previous_time = '?'
+
+ def print_lines_before_prompt(self):
+ last_time = self.pgroup.get_current_time()
+ if last_time != self.previous_time:
+ print
+ if self.pgroup.get_current_thread() != self.previous_thread:
+ self.previous_thread = self.pgroup.get_current_thread()
+ if self.previous_thread == 0:
+ print ('-------------------- in main thread #0 '
+ '--------------------')
+ else:
+ print ('-------------------- in non-main thread '
+ '#%d --------------------' % (self.previous_thread,))
+ self.pgroup.update_watch_values()
last_time = self.pgroup.get_current_time()
- if last_time != previous_time:
- print
- if self.pgroup.get_current_thread() != previous_thread:
- previous_thread = self.pgroup.get_current_thread()
- if previous_thread == 0:
- print ('-------------------- in main thread #0 '
- '--------------------')
- else:
- print ('-------------------- in non-main thread '
- '#%d --------------------' % (previous_thread,))
- self.pgroup.update_watch_values()
- last_time = self.pgroup.get_current_time()
- if self.print_extra_pending_info:
- print self.print_extra_pending_info
- self.print_extra_pending_info = None
- if last_time != previous_time:
- self.pgroup.show_backtrace(complete=0)
- previous_time = last_time
+ if self.print_extra_pending_info:
+ print self.print_extra_pending_info
+ self.print_extra_pending_info = None
+ if last_time != self.previous_time:
+ self.pgroup.show_backtrace(complete=0)
+ self.previous_time = last_time
+ prompt = '(%d)$ ' % last_time
+ return prompt
- prompt = '(%d)$ ' % last_time
+ def display_prompt(self, prompt):
+ try:
+ cmdline = raw_input(prompt).strip()
+ except EOFError:
+ print
+ cmdline = 'quit'
+ if not cmdline:
+ cmdline = self.last_command
+ return cmdline
+
+ def run_command(self, cmdline):
+ match = r_cmdline.match(cmdline)
+ if not match:
+ return
+ self.last_command = cmdline
+ command, argument = match.groups()
+ try:
+ runner = getattr(self, 'command_' + command)
+ except AttributeError:
+ print >> sys.stderr, "no command '%s', try 'help'" % (command,)
+ else:
try:
- cmdline = raw_input(prompt).strip()
- except EOFError:
- print
- cmdline = 'quit'
- if not cmdline:
- cmdline = last_command
- match = r_cmdline.match(cmdline)
- if not match:
- continue
- last_command = cmdline
- command, argument = match.groups()
- try:
- runner = getattr(self, 'command_' + command)
- except AttributeError:
- print >> sys.stderr, "no command '%s', try 'help'" % (command,)
- else:
- try:
- runner(argument)
- except Exception as e:
- traceback.print_exc()
- print >> sys.stderr
- print >> sys.stderr, 'Something went wrong. You are now',
- print >> sys.stderr, 'in a pdb; press Ctrl-D to continue.'
- import pdb; pdb.post_mortem(sys.exc_info()[2])
- print >> sys.stderr
- print >> sys.stderr, 'You are back running %s.' % (
- sys.argv[0],)
+ runner(argument)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ traceback.print_exc()
+ print >> sys.stderr
+ print >> sys.stderr, 'Something went wrong. You are now',
+ print >> sys.stderr, 'in a pdb; press Ctrl-D to continue.'
+ import pdb; pdb.post_mortem(sys.exc_info()[2])
+ print >> sys.stderr
+ print >> sys.stderr, 'You are back running %s.' % (
+ sys.argv[0],)
def command_help(self, argument):
"""Display commands summary"""
diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py
--- a/rpython/translator/revdb/process.py
+++ b/rpython/translator/revdb/process.py
@@ -132,11 +132,11 @@
self.currently_created_objects = msg.arg2
self.current_thread = msg.arg3
- def clone(self):
+ def clone(self, activate=False):
"""Fork this subprocess. Returns a new ReplayProcess() that is
an identical copy.
"""
- self.send(Message(CMD_FORK))
+ self.send(Message(CMD_FORK, int(activate)))
s1, s2 = socket.socketpair()
ancillary.send_fds(self.control_socket.fileno(), [s2.fileno()])
s2.close()
@@ -459,7 +459,7 @@
clone_me = self.paused[from_time]
if self.active is not None:
self.active.close()
- self.active = clone_me.clone()
+ self.active = clone_me.clone(activate=True)
def jump_in_time(self, target_time):
"""Jump in time at the given 'target_time'.
@@ -561,11 +561,13 @@
self.active.send(Message(CMD_ATTACHID, nid, uid, int(watch_env)))
self.active.expect_ready()
- def recreate_subprocess(self):
- # recreate a subprocess at the current time
- time = self.get_current_time()
+ def recreate_subprocess(self, target_time=None):
+ # recreate a subprocess at the given time, or by default the
+ # current time
+ if target_time is None:
+ target_time = self.get_current_time()
self.active = None
- self.jump_in_time(time)
+ self.jump_in_time(target_time)
def print_cmd(self, expression, nids=[]):
"""Print an expression.
diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
--- a/rpython/translator/revdb/src-revdb/revdb.c
+++ b/rpython/translator/revdb/src-revdb/revdb.c
@@ -1249,7 +1249,7 @@
exit(0);
}
-static void command_fork(void)
+static void command_fork(int activate)
{
int child_sockfd;
int child_pid;
@@ -1272,6 +1272,11 @@
}
rpy_rev_sockfd = child_sockfd;
+ /* The 'activate' flag of CMD_FORK tells if the child process
+ must die or not when receiving SIGINT. Active children
+ die; inactive children (stored in 'pgroup.paused') don't. */
+ signal(SIGINT, activate ? SIG_DFL : SIG_IGN);
+
/* Close and re-open the revdb log file in the child process.
This is the simplest way I found to give 'rpy_rev_fileno'
its own offset, independent from the parent. It assumes
@@ -1422,7 +1427,7 @@
switch (cmd.cmd) {
case CMD_FORK:
- command_fork();
+ command_fork(cmd.arg1);
break;
case CMD_QUIT:
diff --git a/rpython/translator/revdb/test/ctrl_c.py b/rpython/translator/revdb/test/ctrl_c.py
new file mode 100644
--- /dev/null
+++ b/rpython/translator/revdb/test/ctrl_c.py
@@ -0,0 +1,43 @@
+import sys, os, thread, time, signal
+
+os.setpgid(0, 0)
+assert os.getpgrp() == os.getpid()
+
+
+sys.path[:] = sys.argv[1].split('\x7f')
+from rpython.translator.revdb.process import ReplayProcessGroup
+
+exename, rdbname = sys.argv[2:]
+group = ReplayProcessGroup(exename, rdbname)
+
+
+class MyInterrupt(Exception):
+ pass
+def my_signal(*args):
+ raise MyInterrupt
+prev_signal = signal.signal(signal.SIGINT, my_signal)
+
+def enable_timer():
+ def my_kill():
+ time.sleep(0.8)
+ print >> sys.stderr, "--<<< Sending CTRL-C >>>--"
+ os.killpg(os.getpid(), signal.SIGINT)
+ thread.start_new_thread(my_kill, ())
+
+all_ok = False
+try:
+ # this runs for ~9 seconds if uninterrupted
+ enable_timer()
+ group.print_cmd('very-long-loop')
+except MyInterrupt:
+ print >> sys.stderr, "very-long-loop interrupted, trying again"
+ group.recreate_subprocess(1)
+ try:
+ enable_timer()
+ group.print_cmd('very-long-loop')
+ except MyInterrupt:
+ print >> sys.stderr, "second interruption ok"
+ all_ok = True
+
+assert all_ok, "expected very-long-loop to be killed by SIGINT"
+print "all ok"
diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py
--- a/rpython/translator/revdb/test/test_process.py
+++ b/rpython/translator/revdb/test/test_process.py
@@ -1,4 +1,4 @@
-import py, sys, math
+import py, sys, math, os, subprocess, time
from cStringIO import StringIO
from rpython.rlib import revdb, rdtoa
from rpython.rlib.debug import debug_print, ll_assert
@@ -56,6 +56,14 @@
revdb.send_output(rdtoa.dtoa(xx) + '\n')
revdb.send_output('%d\n' % yy)
return
+ elif extra == 'very-long-loop':
+ i = 0
+ total = 0
+ while i < 2000000000:
+ total += revdb.flag_io_disabled()
+ i += 1
+ revdb.send_output(str(total))
+ return
else:
assert False
uid = revdb.get_unique_id(stuff)
@@ -214,3 +222,16 @@
with stdout_capture() as buf:
group.print_cmd('2.35')
assert buf.getvalue() == "0.35\n2.0\n0.5875\n2\n"
+
+ def test_ctrl_c(self):
+ localdir = os.path.dirname(__file__)
+ args = [sys.executable, os.path.join(localdir, 'ctrl_c.py'),
+ '\x7f'.join(sys.path),
+ str(self.exename), self.rdbname]
+ t1 = time.time()
+ result = subprocess.check_output(args)
+ t2 = time.time()
+ print 'subprocess returned with captured stdout:\n%r' % (result,)
+ assert result == 'all ok\n'
+ # should take two times ~0.8 seconds if correctly interrupted
+ assert t2 - t1 < 3.0
From pypy.commits at gmail.com Fri Aug 12 16:54:11 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 13:54:11 -0700 (PDT)
Subject: [pypy-commit] pypy reverse-debugger: Print something here
Message-ID: <57ae3773.81cb1c0a.93e5e.8134@mx.google.com>
Author: Armin Rigo
Branch: reverse-debugger
Changeset: r86181:c431addcce4e
Date: 2016-08-12 22:53 +0200
http://bitbucket.org/pypy/pypy/changeset/c431addcce4e/
Log: Print something here
diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py
--- a/rpython/translator/revdb/interact.py
+++ b/rpython/translator/revdb/interact.py
@@ -46,7 +46,11 @@
self.run_command(cmdline)
prompt = self.print_lines_before_prompt()
except KeyboardInterrupt:
- self.pgroup.recreate_subprocess(self.previous_time or 1)
+ rtime = self.previous_time or 1
+ print
+ print 'KeyboardInterrupt: restoring state at time %d...' % (
+ rtime,)
+ self.pgroup.recreate_subprocess(rtime)
self.last_command = ''
self.previous_thread = '?'
self.previous_time = '?'
From pypy.commits at gmail.com Sat Aug 13 02:24:44 2016
From: pypy.commits at gmail.com (arigo)
Date: Fri, 12 Aug 2016 23:24:44 -0700 (PDT)
Subject: [pypy-commit] pypy default: For now,
we can't specify both ``-O1`` and ``--platform=arm``: the first
Message-ID: <57aebd2c.28eac20a.ba98b.c3cc@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86182:31c39adab9d0
Date: 2016-08-13 08:24 +0200
http://bitbucket.org/pypy/pypy/changeset/31c39adab9d0/
Log: For now, we can't specify both ``-O1`` and ``--platform=arm``: the
first option picks ``--gc=boehm`` and the second option picks
``--gcrootfinder=shadowstack``, which are incompatible.
diff --git a/rpython/doc/arm.rst b/rpython/doc/arm.rst
--- a/rpython/doc/arm.rst
+++ b/rpython/doc/arm.rst
@@ -148,7 +148,7 @@
::
- pypy ~/path_to_pypy_checkout/rpython/bin/rpython -O1 --platform=arm target.py
+ pypy ~/path_to_pypy_checkout/rpython/bin/rpython -O2 --platform=arm target.py
If everything worked correctly this should yield an ARM binary. Running this binary in the ARM chroot or on an ARM device should produce the output ``"Hello World"``.
From pypy.commits at gmail.com Sat Aug 13 03:20:14 2016
From: pypy.commits at gmail.com (arigo)
Date: Sat, 13 Aug 2016 00:20:14 -0700 (PDT)
Subject: [pypy-commit] pypy default: Document two branches
Message-ID: <57aeca2e.8628c20a.17598.d0f8@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86183:888cb191638e
Date: 2016-08-13 09:19 +0200
http://bitbucket.org/pypy/pypy/changeset/888cb191638e/
Log: Document two branches
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -128,3 +128,13 @@
.. branch: cpyext-realloc
Implement PyObject_Realloc
+
+.. branch: inline-blocks
+
+Improve a little bit the readability of the generated C code
+
+.. branch: improve-vmprof-testing
+
+Improved vmprof support: now tries hard to not miss any Python-level
+frame in the captured stacks, even if there is the metainterp or
+blackhole interp involved. Also fix the stacklet (greenlet) support.
From pypy.commits at gmail.com Sat Aug 13 13:55:37 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Sat, 13 Aug 2016 10:55:37 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Change python environment in conftest to
python3.5
Message-ID: <57af5f19.c1e31c0a.3cb07.d707@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86184:6d502390ba4f
Date: 2016-08-13 19:54 +0200
http://bitbucket.org/pypy/pypy/changeset/6d502390ba4f/
Log: Change python environment in conftest to python3.5
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -5,7 +5,7 @@
# some tests fail otherwise
sys.setrecursionlimit(2000)
-PYTHON3 = os.getenv('PYTHON3') or py.path.local.sysfind('python3')
+PYTHON3 = os.getenv('PYTHON3') or py.path.local.sysfind('python3.5')
if PYTHON3 is not None:
PYTHON3 = str(PYTHON3)
From pypy.commits at gmail.com Sat Aug 13 15:00:52 2016
From: pypy.commits at gmail.com (mattip)
Date: Sat, 13 Aug 2016 12:00:52 -0700 (PDT)
Subject: [pypy-commit] pypy default: define _GUN_SOURCE before any other
includes
Message-ID: <57af6e64.c75dc20a.853ec.b6f2@mx.google.com>
Author: Matti Picus
Branch:
Changeset: r86185:d468895a18b8
Date: 2016-08-13 21:59 +0300
http://bitbucket.org/pypy/pypy/changeset/d468895a18b8/
Log: define _GUN_SOURCE before any other includes
diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h
--- a/pypy/module/cpyext/include/Python.h
+++ b/pypy/module/cpyext/include/Python.h
@@ -2,6 +2,9 @@
#define Py_PYTHON_H
/* Compat stuff */
+#ifdef __GNUC__
+#define _GNU_SOURCE 1
+#endif
#ifndef _WIN32
# include
# include
@@ -52,7 +55,6 @@
#ifndef DL_IMPORT
# define DL_IMPORT(RTYPE) RTYPE
#endif
-
#include
#ifndef _WIN32
From pypy.commits at gmail.com Sun Aug 14 03:11:28 2016
From: pypy.commits at gmail.com (arigo)
Date: Sun, 14 Aug 2016 00:11:28 -0700 (PDT)
Subject: [pypy-commit] pypy default: Document py2-mappingproxy
Message-ID: <57b019a0.28eac20a.ba98b.4f8f@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86186:91db1a9b7bfd
Date: 2016-08-14 09:10 +0200
http://bitbucket.org/pypy/pypy/changeset/91db1a9b7bfd/
Log: Document py2-mappingproxy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -138,3 +138,9 @@
Improved vmprof support: now tries hard to not miss any Python-level
frame in the captured stacks, even if there is the metainterp or
blackhole interp involved. Also fix the stacklet (greenlet) support.
+
+.. branch: py2-mappingproxy
+
+``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython.
+Previously it returned what looked like a regular dict object (but it
+was already read-only).
From pypy.commits at gmail.com Sun Aug 14 10:17:13 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Sun, 14 Aug 2016 07:17:13 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Undo change of conftest (load python3
again), needs more fixes later
Message-ID: <57b07d69.2916c20a.26156.ceb4@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86187:d564da75d49d
Date: 2016-08-14 16:16 +0200
http://bitbucket.org/pypy/pypy/changeset/d564da75d49d/
Log: Undo change of conftest (load python3 again), needs more fixes later
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -5,7 +5,7 @@
# some tests fail otherwise
sys.setrecursionlimit(2000)
-PYTHON3 = os.getenv('PYTHON3') or py.path.local.sysfind('python3.5')
+PYTHON3 = os.getenv('PYTHON3') or py.path.local.sysfind('python3')
if PYTHON3 is not None:
PYTHON3 = str(PYTHON3)
From pypy.commits at gmail.com Sun Aug 14 16:12:14 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 14 Aug 2016 13:12:14 -0700 (PDT)
Subject: [pypy-commit] pypy memoryview-attributes: test showing current
memoryview shortcomings
Message-ID: <57b0d09e.c186c20a.6dddb.3330@mx.google.com>
Author: Matti Picus
Branch: memoryview-attributes
Changeset: r86188:56370389b4ea
Date: 2016-08-14 21:17 +0300
http://bitbucket.org/pypy/pypy/changeset/56370389b4ea/
Log: test showing current memoryview shortcomings
diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -3633,6 +3633,14 @@
#assert a.base is data.__buffer__
assert a.tostring() == 'abc'
+ def test_memoryview(self):
+ import numpy as np
+ x = np.array([1, 2, 3, 4, 5], dtype='i')
+ y = memoryview('abc')
+ assert y.format == 'B'
+ y = memoryview(x)
+ assert y.format == 'i'
+
def test_fromstring(self):
import sys
from numpy import fromstring, dtype
From pypy.commits at gmail.com Sun Aug 14 16:12:17 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 14 Aug 2016 13:12:17 -0700 (PDT)
Subject: [pypy-commit] pypy memoryview-attributes: add more passing tests
for memoryview attributes
Message-ID: <57b0d0a1.c75dc20a.853ec.3835@mx.google.com>
Author: Matti Picus
Branch: memoryview-attributes
Changeset: r86190:ac8183b0c6c2
Date: 2016-08-14 22:08 +0300
http://bitbucket.org/pypy/pypy/changeset/ac8183b0c6c2/
Log: add more passing tests for memoryview attributes
diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -3635,11 +3635,24 @@
def test_memoryview(self):
import numpy as np
+ import sys
+ if sys.version_info[:2] > (3, 2):
+ # In Python 3.3 the representation of empty shape, strides and sub-offsets
+ # is an empty tuple instead of None.
+ # http://docs.python.org/dev/whatsnew/3.3.html#api-changes
+ EMPTY = ()
+ else:
+ EMPTY = None
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview('abc')
assert y.format == 'B'
y = memoryview(x)
assert y.format == 'i'
+ assert y.shape == (5,)
+ assert y.ndim == 1
+ assert y.strides == (4,)
+ assert y.suboffsets == EMPTY
+ assert y.itemsize == 4
def test_fromstring(self):
import sys
From pypy.commits at gmail.com Sun Aug 14 16:12:16 2016
From: pypy.commits at gmail.com (mattip)
Date: Sun, 14 Aug 2016 13:12:16 -0700 (PDT)
Subject: [pypy-commit] pypy memoryview-attributes: move stride, format, ndim,
itemsize, shape down to Buffer, override in ArrayBuffer
Message-ID: <57b0d0a0.82ddc20a.bde06.2948@mx.google.com>
Author: Matti Picus
Branch: memoryview-attributes
Changeset: r86189:0fcf7070dd43
Date: 2016-08-14 22:07 +0300
http://bitbucket.org/pypy/pypy/changeset/0fcf7070dd43/
Log: move stride, format, ndim, itemsize, shape down to Buffer, override
in ArrayBuffer
diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -704,3 +704,20 @@
def get_raw_address(self):
from rpython.rtyper.lltypesystem import rffi
return rffi.ptradd(self.impl.storage, self.impl.start)
+
+ def getformat(self):
+ return self.impl.dtype.char
+
+ def getitemsize(self):
+ return self.impl.dtype.elsize
+
+ def getndim(self):
+ return len(self.impl.shape)
+
+ def getshape(self):
+ return self.impl.shape
+
+ def getstrides(self):
+ return self.impl.strides
+
+
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -100,22 +100,22 @@
return space.wrap(self.buf.getlength())
def w_get_format(self, space):
- return space.wrap("B")
+ return space.wrap(self.buf.getformat())
def w_get_itemsize(self, space):
- return space.wrap(1)
+ return space.wrap(self.buf.getitemsize())
def w_get_ndim(self, space):
- return space.wrap(1)
+ return space.wrap(self.buf.getndim())
def w_is_readonly(self, space):
return space.wrap(self.buf.readonly)
def w_get_shape(self, space):
- return space.newtuple([space.wrap(self.getlength())])
+ return space.newtuple([space.wrap(x) for x in self.buf.getshape()])
def w_get_strides(self, space):
- return space.newtuple([space.wrap(1)])
+ return space.newtuple([space.wrap(x) for x in self.buf.getstrides()])
def w_get_suboffsets(self, space):
# I've never seen anyone filling this field
diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py
--- a/rpython/rlib/buffer.py
+++ b/rpython/rlib/buffer.py
@@ -59,6 +59,20 @@
def get_raw_address(self):
raise ValueError("no raw buffer")
+ def getformat(self):
+ return 'B'
+
+ def getitemsize(self):
+ return 1
+
+ def getndim(self):
+ return 1
+
+ def getshape(self):
+ return [self.getlength()]
+
+ def getstrides(self):
+ return [1]
class StringBuffer(Buffer):
__slots__ = ['value']
From pypy.commits at gmail.com Mon Aug 15 05:17:02 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 02:17:02 -0700 (PDT)
Subject: [pypy-commit] cffi default: Give an error when subtracting two
pointers and the division's result is
Message-ID: <57b1888e.09afc20a.90904.e4f7@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r2737:763505916552
Date: 2016-08-15 11:03 +0200
http://bitbucket.org/cffi/cffi/changeset/763505916552/
Log: Give an error when subtracting two pointers and the division's
result is not exact (in gcc, we get nonsense, so it means it is
undefined behavior for C, which is best handled by raising in cffi)
diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c
--- a/c/_cffi_backend.c
+++ b/c/_cffi_backend.c
@@ -2355,8 +2355,16 @@
return NULL;
}
itemsize = ct->ct_itemdescr->ct_size;
- if (itemsize <= 0) itemsize = 1;
- diff = (cdv->c_data - cdw->c_data) / itemsize;
+ diff = cdv->c_data - cdw->c_data;
+ if (itemsize > 1) {
+ if (diff % itemsize) {
+ PyErr_SetString(PyExc_ValueError,
+ "pointer subtraction: the distance between the two "
+ "pointers is not a multiple of the item size");
+ return NULL;
+ }
+ diff = diff / itemsize;
+ }
#if PY_MAJOR_VERSION < 3
return PyInt_FromSsize_t(diff);
#else
diff --git a/c/test_c.py b/c/test_c.py
--- a/c/test_c.py
+++ b/c/test_c.py
@@ -587,6 +587,19 @@
e = py.test.raises(TypeError, "q - a")
assert str(e.value) == "cannot subtract cdata 'short *' and cdata 'int *'"
+def test_ptr_sub_unaligned():
+ BInt = new_primitive_type("int")
+ BIntPtr = new_pointer_type(BInt)
+ a = cast(BIntPtr, 1240)
+ for bi in range(1430, 1438):
+ b = cast(BIntPtr, bi)
+ if ((bi - 1240) % size_of_int()) == 0:
+ assert b - a == (bi - 1240) // size_of_int()
+ assert a - b == (1240 - bi) // size_of_int()
+ else:
+ py.test.raises(ValueError, "b - a")
+ py.test.raises(ValueError, "a - b")
+
def test_cast_primitive_from_cdata():
p = new_primitive_type("int")
n = cast(p, cast(p, -42))
From pypy.commits at gmail.com Mon Aug 15 05:17:36 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 02:17:36 -0700 (PDT)
Subject: [pypy-commit] pypy default: cffi/763505916552: Give an error when
subtracting two pointers and the
Message-ID: <57b188b0.47cbc20a.b54cb.e2be@mx.google.com>
Author: Armin Rigo
Branch:
Changeset: r86195:b3b9882a5be7
Date: 2016-08-15 11:07 +0200
http://bitbucket.org/pypy/pypy/changeset/b3b9882a5be7/
Log: cffi/763505916552: Give an error when subtracting two pointers and
the division's result is not exact (in gcc, we get nonsense, so it
means it is undefined behavior for C, which is best handled by
raising in cffi)
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -310,11 +310,15 @@
self.ctype.name, ct.name)
#
itemsize = ct.ctitem.size
- if itemsize <= 0:
- itemsize = 1
with self as ptr1, w_other as ptr2:
diff = (rffi.cast(lltype.Signed, ptr1) -
- rffi.cast(lltype.Signed, ptr2)) // itemsize
+ rffi.cast(lltype.Signed, ptr2))
+ if itemsize > 1:
+ if diff % itemsize:
+ raise oefmt(space.w_ValueError,
+ "pointer subtraction: the distance between the two "
+ "pointers is not a multiple of the item size")
+ diff //= itemsize
return space.wrap(diff)
#
return self._add_or_sub(w_other, -1)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -576,6 +576,19 @@
e = py.test.raises(TypeError, "q - a")
assert str(e.value) == "cannot subtract cdata 'short *' and cdata 'int *'"
+def test_ptr_sub_unaligned():
+ BInt = new_primitive_type("int")
+ BIntPtr = new_pointer_type(BInt)
+ a = cast(BIntPtr, 1240)
+ for bi in range(1430, 1438):
+ b = cast(BIntPtr, bi)
+ if ((bi - 1240) % size_of_int()) == 0:
+ assert b - a == (bi - 1240) // size_of_int()
+ assert a - b == (1240 - bi) // size_of_int()
+ else:
+ py.test.raises(ValueError, "b - a")
+ py.test.raises(ValueError, "a - b")
+
def test_cast_primitive_from_cdata():
p = new_primitive_type("int")
n = cast(p, cast(p, -42))
From pypy.commits at gmail.com Mon Aug 15 06:17:37 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 15 Aug 2016 03:17:37 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Merge with py3k
Message-ID: <57b196c1.8bc71c0a.8c8f4.45b9@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86196:360908aa059a
Date: 2016-08-15 12:16 +0200
http://bitbucket.org/pypy/pypy/changeset/360908aa059a/
Log: Merge with py3k
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1801,40 +1801,6 @@
_warnings.warn(msg, warningcls, stacklevel=stacklevel)
""")
- def resource_warning(self, w_msg, w_tb):
- self.appexec([w_msg, w_tb],
- """(msg, tb):
- import sys
- print >> sys.stderr, msg
- if tb:
- print >> sys.stderr, "Created at (most recent call last):"
- print >> sys.stderr, tb
- """)
-
- def format_traceback(self):
- # we need to disable track_resources before calling the traceback
- # module. Else, it tries to open more files to format the traceback,
- # the file constructor will call space.format_traceback etc., in an
- # inifite recursion
- flag = self.sys.track_resources
- self.sys.track_resources = False
- try:
- return self.appexec([],
- """():
- import sys, traceback
- # the "1" is because we don't want to show THIS code
- # object in the traceback
- try:
- f = sys._getframe(1)
- except ValueError:
- # this happens if you call format_traceback at the very beginning
- # of startup, when there is no bottom code object
- return ''
- return "".join(traceback.format_stack(f))
- """)
- finally:
- self.sys.track_resources = flag
-
class AppExecCache(SpaceCache):
def build(cache, source):
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -209,14 +209,6 @@
self.check(['-c', 'pass'], {'PYTHONNOUSERSITE': '1'}, sys_argv=['-c'],
run_command='pass', **expected)
- def test_track_resources(self, monkeypatch):
- myflag = [False]
- def pypy_set_track_resources(flag):
- myflag[0] = flag
- monkeypatch.setattr(sys, 'pypy_set_track_resources', pypy_set_track_resources, raising=False)
- self.check(['-X', 'track-resources'], {}, sys_argv=[''], run_stdin=True)
- assert myflag[0] == True
-
class TestInteraction:
"""
These tests require pexpect (UNIX-only).
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -134,7 +134,7 @@
assert self.space.lookup(w_instance, "gobbledygook") is None
w_instance = self.space.appexec([], """():
class Lookup(object):
- "bla"
+ "bla"
return Lookup()""")
assert self.space.str_w(self.space.lookup(w_instance, "__doc__")) == "bla"
@@ -148,7 +148,7 @@
assert is_callable(w_func)
w_lambda_func = self.space.appexec([], "(): return lambda: True")
assert is_callable(w_lambda_func)
-
+
w_instance = self.space.appexec([], """():
class Call(object):
def __call__(self): pass
@@ -308,7 +308,7 @@
def test_call_obj_args(self):
from pypy.interpreter.argument import Arguments
-
+
space = self.space
w_f = space.appexec([], """():
@@ -333,7 +333,7 @@
assert w_x is w_9
assert w_y is w_1
- w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
+ w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
assert w_res is w_9
def test_compare_by_iteration(self):
@@ -383,7 +383,7 @@
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('g')))
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('h')))
-class TestModuleMinimal:
+class TestModuleMinimal:
def test_sys_exists(self):
assert self.space.sys
@@ -458,28 +458,3 @@
space.finish()
# assert that we reach this point without getting interrupted
# by the OperationError(NameError)
-
- def test_format_traceback(self):
- from pypy.tool.pytest.objspace import maketestobjspace
- from pypy.interpreter.gateway import interp2app
- #
- def format_traceback(space):
- return space.format_traceback()
- #
- space = maketestobjspace()
- w_format_traceback = space.wrap(interp2app(format_traceback))
- w_tb = space.appexec([w_format_traceback], """(format_traceback):
- def foo():
- return bar()
- def bar():
- return format_traceback()
- return foo()
- """)
- tb = space.str_w(w_tb)
- expected = '\n'.join([
- ' File "?", line 6, in anonymous', # this is the appexec code object
- ' File "?", line 3, in foo',
- ' File "?", line 5, in bar',
- ''
- ])
- assert tb == expected
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -267,7 +267,7 @@
space = self.space
if self.accept_str and space.isinstance_w(w_init, space.w_str):
# special case to optimize strings passed to a "char *" argument
- value = w_init.str_w(space)
+ value = space.bytes_w(w_init)
keepalives[i] = value
buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
rffi.cast(rffi.CCHARPP, cdata)[0] = buf
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -49,8 +49,6 @@
'_current_frames' : 'currentframes._current_frames',
'setrecursionlimit' : 'vm.setrecursionlimit',
'getrecursionlimit' : 'vm.getrecursionlimit',
- 'pypy_set_track_resources' : 'vm.set_track_resources',
- 'pypy_get_track_resources' : 'vm.get_track_resources',
'setcheckinterval' : 'vm.setcheckinterval',
'getcheckinterval' : 'vm.getcheckinterval',
'exc_info' : 'vm.exc_info',
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -61,13 +61,6 @@
"""
return space.wrap(space.sys.recursionlimit)
- at unwrap_spec(flag=bool)
-def set_track_resources(space, flag):
- space.sys.track_resources = flag
-
-def get_track_resources(space):
- return space.wrap(space.sys.track_resources)
-
@unwrap_spec(interval=int)
def setcheckinterval(space, interval):
"""Tell the Python interpreter to check for asynchronous events every
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -434,5 +434,4 @@
FakeObjSpace.sys.filesystemencoding = 'foobar'
FakeObjSpace.sys.defaultencoding = 'ascii'
FakeObjSpace.sys.dlopenflags = 123
-FakeObjSpace.sys.track_resources = False
FakeObjSpace.builtin = FakeModule()
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -1288,8 +1288,9 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise oefmt(space.w_TypeError,
- "cycle among base classes: %s", ' < '.join(names))
+ # Can't use oefmt() here, since names is a list of unicodes
+ raise OperationError(space.w_TypeError, space.newunicode(
+ u"cycle among base classes: " + u' < '.join(names)))
class TypeCache(SpaceCache):
From pypy.commits at gmail.com Mon Aug 15 09:54:03 2016
From: pypy.commits at gmail.com (vext01)
Date: Mon, 15 Aug 2016 06:54:03 -0700 (PDT)
Subject: [pypy-commit] pypy w-xor-x: Only allocate writable pages,
and supply functions to change protection mask.
Message-ID: <57b1c97b.c19d1c0a.2bf58.9911@mx.google.com>
Author: Edd Barrett
Branch: w-xor-x
Changeset: r86197:e32e8a566374
Date: 2016-08-15 14:51 +0100
http://bitbucket.org/pypy/pypy/changeset/e32e8a566374/
Log: Only allocate writable pages, and supply functions to change
protection mask.
diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py
--- a/rpython/rlib/rmmap.py
+++ b/rpython/rlib/rmmap.py
@@ -155,6 +155,8 @@
c_mmap, c_mmap_safe = external('mmap', [PTR, size_t, rffi.INT, rffi.INT,
rffi.INT, off_t], PTR, macro=True,
save_err_on_unsafe=rffi.RFFI_SAVE_ERRNO)
+ c_mprotect, _ = external('mprotect',
+ [PTR, size_t, rffi.INT], rffi.INT)
# 'mmap' on linux32 is a macro that calls 'mmap64'
_, c_munmap_safe = external('munmap', [PTR, size_t], rffi.INT)
c_msync, _ = external('msync', [PTR, size_t, rffi.INT], rffi.INT,
@@ -707,12 +709,22 @@
def alloc_hinted(hintp, map_size):
flags = MAP_PRIVATE | MAP_ANONYMOUS
- prot = PROT_EXEC | PROT_READ | PROT_WRITE
+ prot = PROT_READ | PROT_WRITE
if we_are_translated():
flags = NonConstant(flags)
prot = NonConstant(prot)
return c_mmap_safe(hintp, map_size, prot, flags, -1, 0)
+ def set_pages_executable(addr, size):
+ rv = c_mprotect(addr, size, PROT_EXEC | PROT_READ)
+ if rv < 0:
+ debug.fatalerror_notb("set_pages_executable failed")
+
+ def set_pages_writable(addr, size):
+ rv = c_mprotect(addr, size, PROT_WRITE | PROT_READ)
+ if rv < 0:
+ debug.fatalerror_notb("set_pages_executable failed")
+
def clear_large_memory_chunk_aligned(addr, map_size):
addr = rffi.cast(PTR, addr)
flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS
@@ -951,6 +963,9 @@
return res
alloc._annenforceargs_ = (int,)
+ def set_pages_executable(addr, size):
+ pass # XXX not implemented on windows
+
def free(ptr, map_size):
VirtualFree_safe(ptr, 0, MEM_RELEASE)
From pypy.commits at gmail.com Mon Aug 15 09:54:05 2016
From: pypy.commits at gmail.com (vext01)
Date: Mon, 15 Aug 2016 06:54:05 -0700 (PDT)
Subject: [pypy-commit] pypy w-xor-x: Make cpu_info() W^X compliant.
Message-ID: <57b1c97d.d4e41c0a.5dcb6.8fa1@mx.google.com>
Author: Edd Barrett
Branch: w-xor-x
Changeset: r86198:228237a37ee6
Date: 2016-08-15 14:52 +0100
http://bitbucket.org/pypy/pypy/changeset/228237a37ee6/
Log: Make cpu_info() W^X compliant.
diff --git a/rpython/jit/backend/x86/detect_feature.py b/rpython/jit/backend/x86/detect_feature.py
--- a/rpython/jit/backend/x86/detect_feature.py
+++ b/rpython/jit/backend/x86/detect_feature.py
@@ -1,17 +1,20 @@
import sys
import struct
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.rmmap import alloc, free
+from rpython.rlib.rmmap import alloc, free, set_pages_executable
+
+CPU_INFO_SZ = 4096
def cpu_info(instr):
- data = alloc(4096)
+ data = alloc(CPU_INFO_SZ)
pos = 0
for c in instr:
data[pos] = c
pos += 1
+ set_pages_executable(data, CPU_INFO_SZ)
fnptr = rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), data)
code = fnptr()
- free(data, 4096)
+ free(data, CPU_INFO_SZ)
return code
def detect_sse2():
From pypy.commits at gmail.com Mon Aug 15 10:37:31 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 15 Aug 2016 07:37:31 -0700 (PDT)
Subject: [pypy-commit] pypy default: Add cffi requirement for 'own' tests
Message-ID: <57b1d3ab.d8011c0a.a8158.a347@mx.google.com>
Author: Ronan Lamy
Branch:
Changeset: r86199:2f57d12b8365
Date: 2016-08-15 15:36 +0100
http://bitbucket.org/pypy/pypy/changeset/2f57d12b8365/
Log: Add cffi requirement for 'own' tests
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,5 @@
+cffi>=1.4.0
+
# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
From pypy.commits at gmail.com Mon Aug 15 11:43:46 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 15 Aug 2016 08:43:46 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: hg merge default
Message-ID: <57b1e332.c4ebc20a.96bf2.738d@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86200:f2a780e47063
Date: 2016-08-15 16:43 +0100
http://bitbucket.org/pypy/pypy/changeset/f2a780e47063/
Log: hg merge default
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -128,3 +128,19 @@
.. branch: cpyext-realloc
Implement PyObject_Realloc
+
+.. branch: inline-blocks
+
+Improve a little bit the readability of the generated C code
+
+.. branch: improve-vmprof-testing
+
+Improved vmprof support: now tries hard to not miss any Python-level
+frame in the captured stacks, even if there is the metainterp or
+blackhole interp involved. Also fix the stacklet (greenlet) support.
+
+.. branch: py2-mappingproxy
+
+``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython.
+Previously it returned what looked like a regular dict object (but it
+was already read-only).
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -310,11 +310,15 @@
self.ctype.name, ct.name)
#
itemsize = ct.ctitem.size
- if itemsize <= 0:
- itemsize = 1
with self as ptr1, w_other as ptr2:
diff = (rffi.cast(lltype.Signed, ptr1) -
- rffi.cast(lltype.Signed, ptr2)) // itemsize
+ rffi.cast(lltype.Signed, ptr2))
+ if itemsize > 1:
+ if diff % itemsize:
+ raise oefmt(space.w_ValueError,
+ "pointer subtraction: the distance between the two "
+ "pointers is not a multiple of the item size")
+ diff //= itemsize
return space.wrap(diff)
#
return self._add_or_sub(w_other, -1)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -576,6 +576,19 @@
e = py.test.raises(TypeError, "q - a")
assert str(e.value) == "cannot subtract cdata 'short *' and cdata 'int *'"
+def test_ptr_sub_unaligned():
+ BInt = new_primitive_type("int")
+ BIntPtr = new_pointer_type(BInt)
+ a = cast(BIntPtr, 1240)
+ for bi in range(1430, 1438):
+ b = cast(BIntPtr, bi)
+ if ((bi - 1240) % size_of_int()) == 0:
+ assert b - a == (bi - 1240) // size_of_int()
+ assert a - b == (1240 - bi) // size_of_int()
+ else:
+ py.test.raises(ValueError, "b - a")
+ py.test.raises(ValueError, "a - b")
+
def test_cast_primitive_from_cdata():
p = new_primitive_type("int")
n = cast(p, cast(p, -42))
diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h
--- a/pypy/module/cpyext/include/Python.h
+++ b/pypy/module/cpyext/include/Python.h
@@ -2,6 +2,9 @@
#define Py_PYTHON_H
/* Compat stuff */
+#ifdef __GNUC__
+#define _GNU_SOURCE 1
+#endif
#ifndef _WIN32
# include
# include
@@ -52,7 +55,6 @@
#ifndef DL_IMPORT
# define DL_IMPORT(RTYPE) RTYPE
#endif
-
#include
#ifndef _WIN32
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,5 @@
+cffi>=1.4.0
+
# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/doc/arm.rst b/rpython/doc/arm.rst
--- a/rpython/doc/arm.rst
+++ b/rpython/doc/arm.rst
@@ -148,7 +148,7 @@
::
- pypy ~/path_to_pypy_checkout/rpython/bin/rpython -O1 --platform=arm target.py
+ pypy ~/path_to_pypy_checkout/rpython/bin/rpython -O2 --platform=arm target.py
If everything worked correctly this should yield an ARM binary. Running this binary in the ARM chroot or on an ARM device should produce the output ``"Hello World"``.
diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py
--- a/rpython/rlib/rweakref.py
+++ b/rpython/rlib/rweakref.py
@@ -142,7 +142,7 @@
def compute_result_annotation(self, s_keyclass, s_valueclass):
assert s_keyclass.is_constant()
- s_key = self.bookkeeper.immutablevalue(s_keyclass.const())
+ s_key = self.bookkeeper.valueoftype(s_keyclass.const)
return SomeWeakValueDict(
s_key,
_getclassdef(s_valueclass))
@@ -158,7 +158,7 @@
bk = self.bookkeeper
x = self.instance
return SomeWeakValueDict(
- bk.immutablevalue(x._keyclass()),
+ bk.valueoftype(x._keyclass),
bk.getuniqueclassdef(x._valueclass))
def _getclassdef(s_instance):
diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py
--- a/rpython/rlib/test/test_rweakvaldict.py
+++ b/rpython/rlib/test/test_rweakvaldict.py
@@ -180,3 +180,36 @@
RWeakValueDictionary(str, X).get("foobar")
RWeakValueDictionary(int, Y).get(42)
interpret(g, [])
+
+def test_key_instance():
+ class K(object):
+ pass
+ keys = [K(), K(), K()]
+
+ def g(d):
+ assert d.get(keys[3]) is None
+ x1 = X(); x2 = X(); x3 = X()
+ d.set(keys[0], x1)
+ d.set(keys[1], x2)
+ d.set(keys[2], x3)
+ assert d.get(keys[0]) is x1
+ assert d.get(keys[1]) is x2
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ return x1, x3 # x2 dies
+ def f():
+ keys.append(K())
+ d = RWeakValueDictionary(K, X)
+ x1, x3 = g(d)
+ rgc.collect(); rgc.collect()
+ assert d.get(keys[0]) is x1
+ assert d.get(keys[1]) is None
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ d.set(keys[0], None)
+ assert d.get(keys[0]) is None
+ assert d.get(keys[1]) is None
+ assert d.get(keys[2]) is x3
+ assert d.get(keys[3]) is None
+ f()
+ interpret(f, [])
From pypy.commits at gmail.com Mon Aug 15 12:01:35 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 09:01:35 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: The CFFI + RevDB talk abstract
Message-ID: <57b1e75f.c186c20a.6dddb.841e@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r5667:ac0a0e384922
Date: 2016-08-15 18:01 +0200
http://bitbucket.org/pypy/extradoc/changeset/ac0a0e384922/
Log: The CFFI + RevDB talk abstract
diff --git a/talk/pyconza2016/cffi-revdb.rst b/talk/pyconza2016/cffi-revdb.rst
new file mode 100644
--- /dev/null
+++ b/talk/pyconza2016/cffi-revdb.rst
@@ -0,0 +1,33 @@
+
+CFFI, calling C // RevDB, a new debugger
+========================================
+
+
+Abstract
+--------
+
+Two different topics:
+
+* CFFI: a simple way to call C code from your Python programs;
+
+* RevDB: an experimental "reverse debugger" for Python.
+
+The two topics have in common their existence thanks to PyPy, an
+alternative Python implementation in Python. Both are interesting
+even if you are only using the regular CPython.
+
+*CFFI* is an alternative to using the standard CPython C API to extend
+Python (or other tools like Cython, SWIG or ctypes). It was
+originally inspired by LuaJIT's FFI. Like Cython, you declare C
+functions and compile that with a regular C compiler. Unlike Cython,
+there is no special language: you manipulate C data structures and
+call C functions straight from Python. I will show examples of how
+simple it is to call existing C code with CFFI.
+
+*RevDB* is a reverse debugger for Python, similar to UndoDB-GDB or LL
+for C. You run your program once, in "record" mode; then you start
+the reverse-debugger on the log file. It gives a pdb-like experience,
+but it is replaying your program exactly as it ran---and moreover you
+can now go backward as well as forward in time. You also get
+"watchpoints", which are very useful to find when things change. I
+will show how it works on small examples.
From pypy.commits at gmail.com Mon Aug 15 12:31:11 2016
From: pypy.commits at gmail.com (cfbolz)
Date: Mon, 15 Aug 2016 09:31:11 -0700 (PDT)
Subject: [pypy-commit] pypy default: properly close category even if we
don't have debug_prints enabled, to support
Message-ID: <57b1ee4f.a710c20a.cc582.9794@mx.google.com>
Author: Carl Friedrich Bolz
Branch:
Changeset: r86201:6a6545b4a915
Date: 2016-08-15 18:30 +0200
http://bitbucket.org/pypy/pypy/changeset/6a6545b4a915/
Log: properly close category even if we don't have debug_prints enabled,
to support running with PYPYLOG=-
diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
--- a/rpython/jit/metainterp/logger.py
+++ b/rpython/jit/metainterp/logger.py
@@ -98,6 +98,7 @@
def log_abort_loop(self, trace, memo=None):
debug_start("jit-abort-log")
if not have_debug_prints():
+ debug_stop("jit-abort-log")
return
inputargs, operations = self._unpack_trace(trace)
logops = self._log_operations(inputargs, operations, ops_offset=None,
From pypy.commits at gmail.com Mon Aug 15 13:01:31 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 10:01:31 -0700 (PDT)
Subject: [pypy-commit] pypy rpython-deque: Building a subset of deques in
RPython: really just lists, but
Message-ID: <57b1f56b.45d11c0a.31711.d222@mx.google.com>
Author: Armin Rigo
Branch: rpython-deque
Changeset: r86202:718e6e3e8ece
Date: 2016-08-14 09:13 +0200
http://bitbucket.org/pypy/pypy/changeset/718e6e3e8ece/
Log: Building a subset of deques in RPython: really just lists, but with
a "gap" at the start which allows O(1) implementation of a few
operations like 'lst.insert(0, x)' and 'del lst[:n]'.
From pypy.commits at gmail.com Mon Aug 15 13:01:33 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 10:01:33 -0700 (PDT)
Subject: [pypy-commit] pypy rpython-deque: Annotation
Message-ID: <57b1f56d.898b1c0a.724cf.d481@mx.google.com>
Author: Armin Rigo
Branch: rpython-deque
Changeset: r86203:63bf58058a3c
Date: 2016-08-14 09:27 +0200
http://bitbucket.org/pypy/pypy/changeset/63bf58058a3c/
Log: Annotation
diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py
--- a/rpython/annotator/listdef.py
+++ b/rpython/annotator/listdef.py
@@ -12,6 +12,7 @@
class ListItem(object):
mutated = False # True for lists mutated after creation
resized = False # True for lists resized after creation
+ deque_hinted = False # True for 'list_implemented_as_deque()'
range_step = None # the step -- only for lists only created by a range()
dont_change_any_more = False # set to True when too late for changes
immutable = False # for getattr out of _immutable_fields_ = ['attr[*]']
@@ -49,6 +50,13 @@
raise ListChangeUnallowed("resizing list")
self.resized = True
+ def deque_hint(self):
+ if not self.deque_hinted:
+ if self.dont_change_any_more:
+ raise TooLateForChange
+ self.resize()
+ self.deque_hinted = True
+
def setrangestep(self, step):
if step != self.range_step:
if self.dont_change_any_more:
@@ -79,6 +87,8 @@
self.mutate()
if other.resized:
self.resize()
+ if other.deque_hinted:
+ self.deque_hint()
if other.range_step != self.range_step:
self.setrangestep(self._step_map[type(self.range_step),
type(other.range_step)])
@@ -173,9 +183,10 @@
self.listitem.merge(newlistitem)
def __repr__(self):
- return '<[%r]%s%s%s%s>' % (self.listitem.s_value,
+ return '<[%r]%s%s%s%s%s>' % (self.listitem.s_value,
self.listitem.mutated and 'm' or '',
self.listitem.resized and 'r' or '',
+ self.listitem.deque_hinted and 'd' or '',
self.listitem.immutable and 'I' or '',
self.listitem.must_not_resize and '!R' or '')
@@ -186,6 +197,10 @@
self.listitem.mutate()
self.listitem.resize()
+ def deque_hint(self):
+ self.resize()
+ self.listitem.deque_hint()
+
def never_resize(self):
if self.listitem.resized:
raise ListChangeUnallowed("list already resized")
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4631,6 +4631,20 @@
assert ('string formatting requires a constant string/unicode'
in str(e.value))
+ def test_list_implemented_as_deque(self):
+ def f(x):
+ if x > 5:
+ l = []
+ else:
+ l = []
+ objectmodel.list_implemented_as_deque(l)
+ return l
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [int])
+ assert s.listdef.listitem.mutated
+ assert s.listdef.listitem.resized
+ assert s.listdef.listitem.deque_hinted
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py
--- a/rpython/rlib/objectmodel.py
+++ b/rpython/rlib/objectmodel.py
@@ -431,6 +431,27 @@
hop.exception_is_here()
hop.gendirectcall(r_list.LIST._ll_resize_hint, v_list, v_sizehint)
+def list_implemented_as_deque(l):
+ """Hint that the list 'l' should use an implementation that allows
+ a "gap" at the start, which allows O(1) implementation of a few
+ operations like 'lst.insert(0, x)' and 'del lst[:n]'.
+ """
+
+class Entry(ExtRegistryEntry):
+ _about_ = list_implemented_as_deque
+
+ def compute_result_annotation(self, s_l):
+ from rpython.annotator import model as annmodel
+ if annmodel.s_None.contains(s_l):
+ return # first argument is only None so far, but we
+ # expect a generalization later
+ if not isinstance(s_l, annmodel.SomeList):
+ raise annmodel.AnnotatorError("Argument must be a list")
+ s_l.listdef.deque_hint()
+
+ def specialize_call(self, hop):
+ hop.exception_cannot_occur()
+
# ____________________________________________________________
#
# id-like functions. The idea is that calling hash() or id() is not
From pypy.commits at gmail.com Mon Aug 15 13:01:36 2016
From: pypy.commits at gmail.com (arigo)
Date: Mon, 15 Aug 2016 10:01:36 -0700 (PDT)
Subject: [pypy-commit] pypy rpython-deque: Add everywhere items+start
instead of only items for all lists
Message-ID: <57b1f570.a710c20a.cc582.a1c0@mx.google.com>
Author: Armin Rigo
Branch: rpython-deque
Changeset: r86204:dcf07c3d78cf
Date: 2016-08-15 19:00 +0200
http://bitbucket.org/pypy/pypy/changeset/dcf07c3d78cf/
Log: Add everywhere items+start instead of only items for all lists
diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py
--- a/rpython/rtyper/lltypesystem/rdict.py
+++ b/rpython/rtyper/lltypesystem/rdict.py
@@ -839,7 +839,8 @@
res = LIST.ll_newlist(dic.num_items)
entries = dic.entries
dlen = len(entries)
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
i = 0
p = 0
while i < dlen:
diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py
--- a/rpython/rtyper/lltypesystem/rlist.py
+++ b/rpython/rtyper/lltypesystem/rlist.py
@@ -17,6 +17,7 @@
# Concrete implementation of RPython lists:
#
# struct list {
+# int start; // optional, default to 0
# int length;
# items_array *items;
# }
@@ -67,7 +68,7 @@
"ll_newlist": ll_fixed_newlist,
"ll_newemptylist": ll_fixed_newemptylist,
"ll_length": ll_fixed_length,
- "ll_items": ll_fixed_items,
+ "ll_items_start": ll_fixed_items_start,
"ITEM": ITEM,
"ll_getitem_fast": ll_fixed_getitem_fast,
"ll_setitem_fast": ll_fixed_setitem_fast,
@@ -94,6 +95,11 @@
ITEM = self.item_repr.lowleveltype
ITEMARRAY = self.get_itemarray_lowleveltype()
# XXX we might think of turning length stuff into Unsigned
+ extra = []
+ _ll_items_start = ll_items_0
+ if getattr(self.listitem, 'deque_hinted', False):
+ extra = [("start", Signed)]
+ _ll_items_start = ll_items_start
self.LIST.become(GcStruct("list", ("length", Signed),
("items", Ptr(ITEMARRAY)),
adtmeths = ADTIList({
@@ -101,7 +107,7 @@
"ll_newlist_hint": ll_newlist_hint,
"ll_newemptylist": ll_newemptylist,
"ll_length": ll_length,
- "ll_items": ll_items,
+ "ll_items_start": _ll_items_start,
"ITEM": ITEM,
"ll_getitem_fast": ll_getitem_fast,
"ll_setitem_fast": ll_setitem_fast,
@@ -347,17 +353,22 @@
return l.length
ll_length.oopspec = 'list.len(l)'
-def ll_items(l):
- return l.items
+def ll_items_0(l):
+ return l.items, 0
+
+def ll_items_start(l):
+ return l.items, l.start
def ll_getitem_fast(l, index):
ll_assert(index < l.length, "getitem out of bounds")
- return l.ll_items()[index]
+ items, start = l.ll_items_start()
+ return items[start + index]
ll_getitem_fast.oopspec = 'list.getitem(l, index)'
def ll_setitem_fast(l, index, item):
ll_assert(index < l.length, "setitem out of bounds")
- l.ll_items()[index] = item
+ items, start = l.ll_items_start()
+ items[start + index] = item
ll_setitem_fast.oopspec = 'list.setitem(l, index, item)'
# fixed size versions
@@ -377,8 +388,8 @@
return len(l)
ll_fixed_length.oopspec = 'list.len(l)'
-def ll_fixed_items(l):
- return l
+def ll_fixed_items_start(l):
+ return l, 0
def ll_fixed_getitem_fast(l, index):
ll_assert(index < len(l), "fixed getitem out of bounds")
diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py
--- a/rpython/rtyper/lltypesystem/rordereddict.py
+++ b/rpython/rtyper/lltypesystem/rordereddict.py
@@ -1236,7 +1236,8 @@
res = LIST.ll_newlist(dic.num_live_items)
entries = dic.entries
dlen = dic.num_ever_used_items
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
i = 0
p = 0
while i < dlen:
diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -182,11 +182,18 @@
# where NULL is always valid: it is chr(0)
- def _list_length_items(self, hop, v_lst, LIST):
+ def _list_length_items_start(self, hop, v_lst, LIST):
LIST = LIST.TO
v_length = hop.gendirectcall(LIST.ll_length, v_lst)
- v_items = hop.gendirectcall(LIST.ll_items, v_lst)
- return v_length, v_items
+ v_items = hop.gendirectcall(_ll_items_of, v_lst)
+ v_start = hop.gendirectcall(_ll_start_of, v_lst)
+ return v_length, v_items, v_start
+
+def _ll_items_of(l):
+ return l.ll_items_start()[0]
+def _ll_start_of(l):
+ return l.ll_items_start()[1]
+
class StringRepr(BaseLLStringRepr, AbstractStringRepr):
lowleveltype = Ptr(STR)
@@ -507,7 +514,7 @@
return result
@staticmethod
- def ll_join(s, length, items):
+ def ll_join(s, length, items, start):
s_chars = s.chars
s_len = len(s_chars)
num_items = length
@@ -517,7 +524,7 @@
i = 0
while i < num_items:
try:
- itemslen = ovfcheck(itemslen + len(items[i].chars))
+ itemslen = ovfcheck(itemslen + len(items[start + i].chars))
except OverflowError:
raise MemoryError
i += 1
@@ -528,14 +535,14 @@
# a single '+' at the end is allowed to overflow: it gets
# a negative result, and the gc will complain
result = s.malloc(itemslen + seplen)
- res_index = len(items[0].chars)
- s.copy_contents(items[0], result, 0, 0, res_index)
+ res_index = len(items[start].chars)
+ s.copy_contents(items[start], result, 0, 0, res_index)
i = 1
while i < num_items:
s.copy_contents(s, result, 0, res_index, s_len)
res_index += s_len
- lgt = len(items[i].chars)
- s.copy_contents(items[i], result, 0, res_index, lgt)
+ lgt = len(items[start + i].chars)
+ s.copy_contents(items[start + i], result, 0, res_index, lgt)
res_index += lgt
i += 1
return result
@@ -811,20 +818,20 @@
return count
@staticmethod
- @signature(types.int(), types.any(), returns=types.any())
- @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic(
- items, length))
- def ll_join_strs(length, items):
+ @signature(types.int(), types.any(), types.int(), returns=types.any())
+ @jit.look_inside_iff(lambda length, items, start:
+ jit.loop_unrolling_heuristic(items, length))
+ def ll_join_strs(length, items, start):
# Special case for length 1 items, helps both the JIT and other code
if length == 1:
- return items[0]
+ return items[start]
num_items = length
itemslen = 0
i = 0
while i < num_items:
try:
- itemslen = ovfcheck(itemslen + len(items[i].chars))
+ itemslen = ovfcheck(itemslen + len(items[start + i].chars))
except OverflowError:
raise MemoryError
i += 1
@@ -838,18 +845,17 @@
res_index = 0
i = 0
while i < num_items:
- item_chars = items[i].chars
+ item_chars = items[start + i].chars
item_len = len(item_chars)
- copy_contents(items[i], result, 0, res_index, item_len)
+ copy_contents(items[start + i], result, 0, res_index, item_len)
res_index += item_len
i += 1
return result
@staticmethod
- @jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars))
- def ll_join_chars(length, chars, RES):
- # no need to optimize this, will be replaced by string builder
- # at some point soon
+ @jit.look_inside_iff(lambda length, chars, start, RES:
+ jit.isconstant(length) and jit.isvirtual(chars))
+ def ll_join_chars(length, chars, start, RES):
num_chars = length
if RES is StringRepr.lowleveltype:
target = Char
@@ -861,7 +867,7 @@
res_chars = result.chars
i = 0
while i < num_chars:
- res_chars[i] = cast_primitive(target, chars[i])
+ res_chars[i] = cast_primitive(target, chars[start + i])
i += 1
return result
@@ -918,7 +924,8 @@
break
i += 1
res = LIST.ll_newlist(count)
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
i = 0
j = 0
resindex = 0
@@ -951,7 +958,8 @@
pos = s.find(c, pos + markerlen, last)
count += 1
res = LIST.ll_newlist(count)
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
pos = 0
count = 0
pos = s.find(c, 0, last)
@@ -985,7 +993,8 @@
break
i += 1
res = LIST.ll_newlist(count)
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
i = strlen
j = strlen
resindex = count - 1
@@ -1018,7 +1027,8 @@
pos = s.rfind(c, 0, pos - markerlen)
count += 1
res = LIST.ll_newlist(count)
- items = res.ll_items()
+ items, start = res.ll_items_start()
+ assert start == 0
pos = 0
pos = len(s.chars)
prev_pos = pos
@@ -1133,7 +1143,7 @@
@staticmethod
def ll_build_finish(builder):
- return LLHelpers.ll_join_strs(len(builder), builder)
+ return LLHelpers.ll_join_strs(len(builder), builder, 0)
@staticmethod
@specialize.memo()
@@ -1210,14 +1220,16 @@
hop.genop('setarrayitem', [vtemp, i, vchunk])
hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%'
- return hop.gendirectcall(cls.ll_join_strs, size, vtemp)
+ c_zero = inputconst(Signed, 0)
+ return hop.gendirectcall(cls.ll_join_strs, size, vtemp, c_zero)
@staticmethod
@jit.dont_look_inside
def ll_string2list(RESLIST, src):
length = len(src.chars)
lst = RESLIST.ll_newlist(length)
- dst = lst.ll_items()
+ dst, start = lst.ll_items_start()
+ assert start == 0
SRC = typeOf(src).TO # STR or UNICODE
DST = typeOf(dst).TO # GcArray
assert DST.OF is SRC.chars.OF
diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py
--- a/rpython/rtyper/rlist.py
+++ b/rpython/rtyper/rlist.py
@@ -555,8 +555,10 @@
def ll_arraycopy(source, dest, source_start, dest_start, length):
SRCTYPE = typeOf(source)
# lltype
- rgc.ll_arraycopy(source.ll_items(), dest.ll_items(),
- source_start, dest_start, length)
+ srcitems, srcstart = source.ll_items_start()
+ dstitems, dststart = dest.ll_items_start()
+ rgc.ll_arraycopy(srcitems, dstitems,
+ srcstart + source_start, dststart + dest_start, length)
def ll_copy(RESLIST, l):
diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py
--- a/rpython/rtyper/rstr.py
+++ b/rpython/rtyper/rstr.py
@@ -203,7 +203,7 @@
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_isalnum, v_str)
- def _list_length_items(self, hop, v_lst, LIST):
+ def _list_length_items_start(self, hop, v_lst, LIST):
"""Return two Variables containing the length and items of a
list. Need to be overriden because it is typesystem-specific."""
raise NotImplementedError
@@ -219,7 +219,8 @@
if not isinstance(r_lst, BaseListRepr):
raise TyperError("string.join of non-list: %r" % r_lst)
v_str, v_lst = hop.inputargs(rstr.repr, r_lst)
- v_length, v_items = self._list_length_items(hop, v_lst, r_lst.lowleveltype)
+ v_length, v_items, v_start = self._list_length_items_start(hop, v_lst,
+ r_lst.lowleveltype)
if hop.args_s[0].is_constant() and hop.args_s[0].const == '':
if r_lst.item_repr == rstr.repr:
@@ -228,16 +229,16 @@
r_lst.item_repr == unichar_repr):
v_tp = hop.inputconst(Void, self.lowleveltype)
return hop.gendirectcall(self.ll.ll_join_chars, v_length,
- v_items, v_tp)
+ v_items, v_start, v_tp)
else:
raise TyperError("''.join() of non-string list: %r" % r_lst)
- return hop.gendirectcall(llfn, v_length, v_items)
+ return hop.gendirectcall(llfn, v_length, v_items, v_start)
else:
if r_lst.item_repr == rstr.repr:
llfn = self.ll.ll_join
else:
raise TyperError("sep.join() of non-string list: %r" % r_lst)
- return hop.gendirectcall(llfn, v_str, v_length, v_items)
+ return hop.gendirectcall(llfn, v_str, v_length, v_items, v_start)
def rtype_method_splitlines(self, hop):
rstr = hop.args_r[0].repr
diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py
--- a/rpython/rtyper/test/test_llinterp.py
+++ b/rpython/rtyper/test/test_llinterp.py
@@ -207,9 +207,11 @@
def f():
return [1,2,3]
res = interpret(f,[])
- assert len(res.ll_items()) == len([1,2,3])
+ items, start = res.ll_items_start()
+ assert start == 0
+ assert len(items) == len([1,2,3])
for i in range(3):
- assert res.ll_items()[i] == i+1
+ assert items[i] == i+1
def test_list_itemops():
def f(i):
@@ -250,10 +252,12 @@
l.reverse()
return l
res = interpret(f,[])
- assert len(res.ll_items()) == len([3,2,1])
+ items, start = res.ll_items_start()
+ assert start == 0
+ assert len(items) == len([3,2,1])
print res
for i in range(3):
- assert res.ll_items()[i] == 3-i
+ assert items[i] == 3-i
def test_list_pop():
def f():
@@ -263,7 +267,9 @@
l3 = l.pop(-1)
return [l1,l2,l3]
res = interpret(f,[])
- assert len(res.ll_items()) == 3
+ items, start = res.ll_items_start()
+ assert start == 0
+ assert len(items) == 3
def test_ovf():
def f(x):
diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py
--- a/rpython/rtyper/test/tool.py
+++ b/rpython/rtyper/test/tool.py
@@ -77,8 +77,8 @@
@staticmethod
def ll_to_list(l):
r = []
- items = l.ll_items()
- for i in range(l.ll_length()):
+ items, start = l.ll_items_start()
+ for i in range(start, start + l.ll_length()):
r.append(items[i])
return r
From pypy.commits at gmail.com Mon Aug 15 16:13:01 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Mon, 15 Aug 2016 13:13:01 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Add asyncio test for asynchronous context
managers
Message-ID: <57b2224d.531d1c0a.9874.14ea@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5
Changeset: r86205:90eef5573de8
Date: 2016-08-15 22:11 +0200
http://bitbucket.org/pypy/pypy/changeset/90eef5573de8/
Log: Add asyncio test for asynchronous context managers
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -18,3 +18,31 @@
loop.run_until_complete(f())
print("done with async loop")
"""
+
+ def test_asynchronous_context_managers(self):
+ """
+import encodings.idna
+import asyncio
+
+class Corotest(object):
+ def __init__(self):
+ self.res = "-"
+
+ async def coro(self, name, lock):
+ self.res += ' coro {}: waiting for lock -'.format(name)
+ async with lock:
+ self.res += ' coro {}: holding the lock -'.format(name)
+ await asyncio.sleep(1)
+ self.res += ' coro {}: releasing the lock -'.format(name)
+
+cor = Corotest()
+loop = asyncio.get_event_loop()
+lock = asyncio.Lock()
+coros = asyncio.gather(cor.coro(1, lock), cor.coro(2, lock))
+try:
+ loop.run_until_complete(coros)
+finally:
+ loop.close()
+
+assert cor.res == "- coro 1: waiting for lock - coro 1: holding the lock - coro 2: waiting for lock - coro 1: releasing the lock - coro 2: holding the lock - coro 2: releasing the lock -"
+ """
From pypy.commits at gmail.com Mon Aug 15 19:44:01 2016
From: pypy.commits at gmail.com (rlamy)
Date: Mon, 15 Aug 2016 16:44:01 -0700 (PDT)
Subject: [pypy-commit] pypy py3k: Add usemodules declarations that py3k
seems to require
Message-ID: <57b253c1.c62f1c0a.ea45f.4036@mx.google.com>
Author: Ronan Lamy
Branch: py3k
Changeset: r86206:958c3593f88a
Date: 2016-08-16 00:43 +0100
http://bitbucket.org/pypy/pypy/changeset/958c3593f88a/
Log: Add usemodules declarations that py3k seems to require
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -79,7 +79,7 @@
class AppTestRecompiler:
- spaceconfig = dict(usemodules=['_cffi_backend', 'imp'])
+ spaceconfig = dict(usemodules=['_cffi_backend', 'imp', 'cpyext', 'struct'])
def setup_class(cls):
if cls.runappdirect:
From pypy.commits at gmail.com Mon Aug 15 20:13:59 2016
From: pypy.commits at gmail.com (wlav)
Date: Mon, 15 Aug 2016 17:13:59 -0700 (PDT)
Subject: [pypy-commit] pypy cling-support: from Aditi: resolve std::string
ctor overload with length
Message-ID: <57b25ac7.11331c0a.388e3.87ef@mx.google.com>
Author: Wim Lavrijsen
Branch: cling-support
Changeset: r86207:eb960a97982c
Date: 2016-08-15 17:03 -0700
http://bitbucket.org/pypy/pypy/changeset/eb960a97982c/
Log: from Aditi: resolve std::string ctor overload with length
diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -17,10 +17,11 @@
pass
# overload priorities: lower is preferred
-priority = { 'void*' : 100,
- 'void**' : 100,
- 'float' : 30,
- 'double' : 10, }
+priority = { 'void*' : 100,
+ 'void**' : 100,
+ 'float' : 30,
+ 'double' : 10,
+ 'const string&' : 1, } # solves a specific string ctor overload
from rpython.rlib.listsort import make_timsort_class
CPPMethodBaseTimSort = make_timsort_class()
From pypy.commits at gmail.com Mon Aug 15 20:14:01 2016
From: pypy.commits at gmail.com (wlav)
Date: Mon, 15 Aug 2016 17:14:01 -0700 (PDT)
Subject: [pypy-commit] pypy cling-support: from Aditi (modified): allow
passing of strings with \0 in them
Message-ID: <57b25ac9.031dc20a.de9d1.14a4@mx.google.com>
Author: Wim Lavrijsen
Branch: cling-support
Changeset: r86208:7f130da8dc67
Date: 2016-08-15 17:09 -0700
http://bitbucket.org/pypy/pypy/changeset/7f130da8dc67/
Log: from Aditi (modified): allow passing of strings with \0 in them
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -158,11 +158,15 @@
return _c_call_r(cppmethod, cppobject, nargs, args)
_c_call_s = rffi.llexternal(
"cppyy_call_s",
- [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP,
+ [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.INTP], rffi.CCHARP,
releasegil=ts_call,
compilation_info=backend.eci)
def c_call_s(space, cppmethod, cppobject, nargs, args):
- return _c_call_s(cppmethod, cppobject, nargs, args)
+ length = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
+ cstr = _c_call_s(cppmethod, cppobject, nargs, args, length)
+ cstr_len = int(length[0])
+ lltype.free(length, flavor='raw')
+ return cstr, cstr_len
_c_constructor = rffi.llexternal(
"cppyy_constructor",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -146,7 +146,8 @@
'call_d' : ([c_method, c_object, c_int, c_voidp], c_double),
'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp),
- 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp),
+ # call_s actually takes an intp as last parameter, but this will do
+ 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp),
'constructor' : ([c_method, c_object, c_int, c_voidp], c_object),
'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object),
@@ -336,8 +337,12 @@
args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)]
return _cdata_to_ptr(space, call_capi(space, 'call_r', args))
def c_call_s(space, cppmethod, cppobject, nargs, cargs):
- args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)]
- return call_capi(space, 'call_s', args)
+ length = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
+ args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Args(vp=length)]
+ cstr = call_capi(space, 'call_s', args)
+ cstr_len = int(length[0])
+ lltype.free(length, flavor='raw')
+ return cstr, cstr_len
def c_constructor(space, cppmethod, cppobject, nargs, cargs):
args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)]
diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py
--- a/pypy/module/cppyy/executor.py
+++ b/pypy/module/cppyy/executor.py
@@ -195,8 +195,10 @@
class StdStringExecutor(InstancePtrExecutor):
def execute(self, space, cppmethod, cppthis, num_args, args):
- cstr_result = capi.c_call_s(space, cppmethod, cppthis, num_args, args)
- return space.wrap(capi.charp2str_free(space, cstr_result))
+ cstr, cstr_len = capi.c_call_s(space, cppmethod, cppthis, num_args, args)
+ string = rffi.charpsize2str(cstr, cstr_len)
+ capi.c_free(rffi.cast(rffi.VOIDP, cstr))
+ return space.wrap(string)
def execute_libffi(self, space, cif_descr, funcaddr, buffer):
from pypy.module.cppyy.interp_cppyy import FastCallNotPossible
diff --git a/pypy/module/cppyy/include/capi.h b/pypy/module/cppyy/include/capi.h
--- a/pypy/module/cppyy/include/capi.h
+++ b/pypy/module/cppyy/include/capi.h
@@ -59,7 +59,7 @@
RPY_EXTERN
void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args);
RPY_EXTERN
- char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args);
+ char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, int* length);
RPY_EXTERN
cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t klass, int nargs, void* args);
diff --git a/pypy/module/cppyy/include/cpp_cppyy.h b/pypy/module/cppyy/include/cpp_cppyy.h
--- a/pypy/module/cppyy/include/cpp_cppyy.h
+++ b/pypy/module/cppyy/include/cpp_cppyy.h
@@ -62,7 +62,7 @@
Double_t CallD( TCppMethod_t method, TCppObject_t self, void* args );
LongDouble_t CallLD( TCppMethod_t method, TCppObject_t self, void* args );
void* CallR( TCppMethod_t method, TCppObject_t self, void* args );
- Char_t* CallS( TCppMethod_t method, TCppObject_t self, void* args );
+ Char_t* CallS( TCppMethod_t method, TCppObject_t self, void* args, int* length );
TCppObject_t CallConstructor( TCppMethod_t method, TCppType_t type, void* args );
void CallDestructor( TCppType_t type, TCppObject_t self );
TCppObject_t CallO( TCppMethod_t method, TCppObject_t self, void* args, TCppType_t result_type );
diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/cppyy/src/clingcwrapper.cxx
--- a/pypy/module/cppyy/src/clingcwrapper.cxx
+++ b/pypy/module/cppyy/src/clingcwrapper.cxx
@@ -113,10 +113,10 @@
}
static inline
-char* cppstring_to_cstring( const std::string& name ) {
- char* name_char = (char*)malloc(name.size() + 1 );
- strcpy( name_char, name.c_str() );
- return name_char;
+char* cppstring_to_cstring( const std::string& cppstr ) {
+ char* cstr = (char*)malloc( cppstr.size() + 1 );
+ memcpy( cstr, cppstr.c_str(), cppstr.size() + 1 );
+ return cstr;
}
@@ -478,12 +478,20 @@
return nullptr;
}
-Char_t* Cppyy::CallS( TCppMethod_t method, TCppObject_t self, void* args )
+Char_t* Cppyy::CallS(
+ TCppMethod_t method, TCppObject_t self, void* args, int* length )
{
- Char_t* s = nullptr;
- if ( FastCall( method, args, (void*)self, &s ) )
- return s;
- return nullptr;
+ char* cstr = nullptr;
+ TClassRef cr("std::string");
+ std::string* cppresult = (std::string*)malloc( sizeof(std::string) );
+ if ( FastCall( method, args, self, (void*)cppresult ) ) {
+ cstr = cppstring_to_cstring( *cppresult );
+ *length = cppresult->size();
+ cppresult->std::string::~string();
+ } else
+ *length = 0;
+ free( (void*)cppresult );
+ return cstr;
}
Cppyy::TCppObject_t Cppyy::CallConstructor(
@@ -1188,20 +1196,20 @@
return (double)Cppyy::CallD(method, (void*)self, &parvec);
}
-void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) {
+void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) {
std::vector parvec = vsargs_to_parvec(args, nargs);
return (void*)Cppyy::CallR(method, (void*)self, &parvec);
}
-char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) {
+char* cppyy_call_s(
+ cppyy_method_t method, cppyy_object_t self, int nargs, void* args, int* length) {
std::vector parvec = vsargs_to_parvec(args, nargs);
- return cppstring_to_cstring(Cppyy::CallS(method, (void*)self, &parvec));
+ return Cppyy::CallS(method, (void*)self, &parvec, length);
}
cppyy_object_t cppyy_constructor(cppyy_method_t method, cppyy_type_t klass, int nargs, void* args) {
std::vector parvec = vsargs_to_parvec(args, nargs);
return cppyy_object_t(Cppyy::CallConstructor(method, klass, &parvec));
-// return cppyy_object_t(Cppyy::CallConstructor(method, klass, args));
}
cppyy_object_t cppyy_call_o(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, cppyy_type_t result_type) {
From pypy.commits at gmail.com Tue Aug 16 03:34:39 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 00:34:39 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: some changes to pypy abstract for
pyconza, added progress file for august (py3.5)
Message-ID: <57b2c20f.09afc20a.90904.6eb6@mx.google.com>
Author: Richard Plangger
Branch: extradoc
Changeset: r5668:3c35a2b9d370
Date: 2016-08-16 08:47 +0200
http://bitbucket.org/pypy/extradoc/changeset/3c35a2b9d370/
Log: some changes to pypy abstract for pyconza, added progress file for
august (py3.5)
diff --git a/planning/py3.5/2016-august-progress.rst b/planning/py3.5/2016-august-progress.rst
new file mode 100644
--- /dev/null
+++ b/planning/py3.5/2016-august-progress.rst
@@ -0,0 +1,10 @@
+August 2016
+===========
+
+Planned
+-------
+
+* Implement changes to memory view. e.g. hex(): https://bugs.python.org/issue9951 (plan_rich)
+
+Finished
+--------
diff --git a/talk/pyconza2016/pypy-abstract.txt b/talk/pyconza2016/pypy-abstract.txt
--- a/talk/pyconza2016/pypy-abstract.txt
+++ b/talk/pyconza2016/pypy-abstract.txt
@@ -3,36 +3,16 @@
In this talk I want to show how you can use PyPy for your benefit.
It will kick off with a short introduction covering PyPy and its just in time
-compiler. PyPy is the most advanced Python interpreter around (besides CPython)
-XXX
-XXX you seem to say "CPython is more advanced than PyPy" above,
-XXX which doesn't make sense in this sentence because you say
-XXX below that PyPy is much faster than CPython
-XXX
+compiler. PyPy is the most advanced Python interpreter around
and while it should generally just speed up your programs there is a wide range
of performance that you can get out of PyPy.
The first part, will cover considerations why one should write Python programs,
and only spend fractions of the development time to optimize your program.
-XXX
-XXX you mean, you want to explain that developers should write in Python
-XXX and spend only a small part of their time optimizing the program?
-XXX or something else? if I'm right then you should add below something
-XXX like "The second part of this session will be about this small part
-XXX of time: in cases where you need it, then I'll show tools that..."
-XXX But I'm not sure that's what you mean because CFFI is not really
-XXX about that: I'm trying to push it as a general solution also for
-XXX CPython, without focusing too much on performance. Maybe we should
-XXX have this talk be really about PyPy, and then for the other talk
-XXX I should have both CFFI and RevDB?
-XXX
-The second part of this session will show and give you the knowledge and
-tools to inspect and change your program to improve it. We will cover two tools in detail:
-CFFI & VMProf.
-
-Our advanced library CFFI (C Foreign Function Interface) can easily replace
-CPython extension code. VMProf is a platform to inspect you program while it is running,
-imposing very little overhead.
+The second part of this session will be about this small part
+of time: in cases where you need it, then I'll show tools that help you inspect
+and change your program to improve it. We will also dive into one tool more elaboratly.
+VMProf, a platform to inspect your program while it is running, imposing very little overhead.
Throughout the talk real world examples will motivate why PyPy is a viable option
to optimize you Python programs and present the examples' value to their developers.
From pypy.commits at gmail.com Tue Aug 16 03:40:59 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 16 Aug 2016 00:40:59 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: Add the formal content of
milestone 1
Message-ID: <57b2c38b.88711c0a.56af.bed1@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r5669:bd8ee375d546
Date: 2016-08-16 09:40 +0200
http://bitbucket.org/pypy/extradoc/changeset/bd8ee375d546/
Log: Add the formal content of milestone 1
diff --git a/planning/py3.5/2016-august-progress.rst b/planning/py3.5/2016-august-progress.rst
--- a/planning/py3.5/2016-august-progress.rst
+++ b/planning/py3.5/2016-august-progress.rst
@@ -8,3 +8,41 @@
Finished
--------
+
+
+
+Milestone 1 (Aug-Sep-Oct 2016)
+------------------------------
+
+We have reached milestone 1 when we have done all the following point,
+possibly minus one of them if it is found during development that
+properly implementing it requires significantly more efforts than
+planned:
+
+* PEP 492, coroutines with async and await syntax. (The complete PEP
+ is included.)
+
+* PEP 465, a new matrix multiplication operator: a @ b.
+
+* PEP 448, additional unpacking generalizations.
+
+* bytes % args, bytearray % args: PEP 461
+
+* New bytes.hex(), bytearray.hex() and memoryview.hex() methods.
+
+* memoryview now supports tuple indexing
+
+* Generators have a new gi_yieldfrom attribute
+
+* A new RecursionError exception is now raised when maximum recursion
+ depth is reached.
+
+* The new os.scandir() function
+
+* Newly created file descriptors are non-inheritable (PEP 446)
+
+* The marshal format has been made more compact and efficient
+
+* enum: Support for enumeration types (PEP 435).
+
+* pathlib: Object-oriented filesystem paths (PEP 428).
From pypy.commits at gmail.com Tue Aug 16 04:03:15 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 16 Aug 2016 01:03:15 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Fix site.py
Message-ID: <57b2c8c3.2916c20a.26156.7fb2@mx.google.com>
Author: Armin Rigo
Branch: py3.5
Changeset: r86209:982050f81cb9
Date: 2016-08-16 09:11 +0100
http://bitbucket.org/pypy/pypy/changeset/982050f81cb9/
Log: Fix site.py
diff --git a/lib-python/3/site.py b/lib-python/3/site.py
--- a/lib-python/3/site.py
+++ b/lib-python/3/site.py
@@ -378,8 +378,8 @@
license = "See https://www.python.org/psf/license/"
licenseargs = (license, files, dirs)
- builtins.credits = _Printer("credits", credits)
- builtins.license = _Printer("license", *licenseargs)
+ builtins.credits = _sitebuiltins._Printer("credits", credits)
+ builtins.license = _sitebuiltins._Printer("license", *licenseargs)
def sethelper():
builtins.help = _sitebuiltins._Helper()
From pypy.commits at gmail.com Tue Aug 16 04:18:39 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 16 Aug 2016 01:18:39 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: fixes
Message-ID: <57b2cc5f.c2a5c20a.de387.8190@mx.google.com>
Author: Armin Rigo
Branch: py3.5
Changeset: r86210:f4068c63d5e3
Date: 2016-08-16 09:26 +0100
http://bitbucket.org/pypy/pypy/changeset/f4068c63d5e3/
Log: fixes
diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py
--- a/lib-python/3/distutils/command/build_ext.py
+++ b/lib-python/3/distutils/command/build_ext.py
@@ -11,7 +11,6 @@
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
-from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
@@ -30,6 +29,7 @@
show_compilers()
def _get_c_extension_suffix():
+ import importlib
suffixes = importlib.machinery.EXTENSION_SUFFIXES
return suffixes[0] if suffixes else None
@@ -204,6 +204,7 @@
# this allows distutils on windows to work in the source tree
if 0:
# pypy has no config_h_filename directory
+ from distutils.sysconfig import get_config_h_filename
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
From pypy.commits at gmail.com Tue Aug 16 04:25:58 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 16 Aug 2016 01:25:58 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5: Revert all changes to this from Python
3.5.1. This mainly reverts
Message-ID: <57b2ce16.a427c20a.44913.8825@mx.google.com>
Author: Armin Rigo
Branch: py3.5
Changeset: r86211:4c4ec4987626
Date: 2016-08-16 10:25 +0200
http://bitbucket.org/pypy/pypy/changeset/4c4ec4987626/
Log: Revert all changes to this from Python 3.5.1. This mainly reverts
1341a432e134, which I think is not needed any more in 3.5
diff --git a/lib-python/3/code.py b/lib-python/3/code.py
--- a/lib-python/3/code.py
+++ b/lib-python/3/code.py
@@ -140,32 +140,15 @@
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
- lines = []
- for value, tb in traceback._iter_chain(*ei[1:]):
- if isinstance(value, str):
- lines.append(value)
- lines.append('\n')
- continue
- if tb:
- tblist = traceback.extract_tb(tb)
- if tb is last_tb:
- # The last traceback includes the frame we
- # exec'd in
- del tblist[:1]
- tblines = traceback.format_list(tblist)
- if tblines:
- lines.append("Traceback (most recent call last):\n")
- lines.extend(tblines)
- lines.extend(traceback.format_exception_only(type(value),
- value))
+ lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
+ if sys.excepthook is sys.__excepthook__:
+ self.write(''.join(lines))
+ else:
+ # If someone has set sys.excepthook, we let that take precedence
+ # over self.write
+ sys.excepthook(ei[0], ei[1], last_tb)
finally:
- tblist = last_tb = ei = None
- if sys.excepthook is sys.__excepthook__:
- self.write(''.join(lines))
- else:
- # If someone has set sys.excepthook, we let that take precedence
- # over self.write
- sys.excepthook(sys.last_type, sys.last_value, last_tb)
+ last_tb = ei = None
def write(self, data):
"""Write a string.
From pypy.commits at gmail.com Tue Aug 16 04:30:23 2016
From: pypy.commits at gmail.com (arigo)
Date: Tue, 16 Aug 2016 01:30:23 -0700 (PDT)
Subject: [pypy-commit] extradoc extradoc: Update
Message-ID: <57b2cf1f.54bc1c0a.2469c.c880@mx.google.com>
Author: Armin Rigo
Branch: extradoc
Changeset: r5670:1958a42c9de1
Date: 2016-08-16 10:30 +0200
http://bitbucket.org/pypy/extradoc/changeset/1958a42c9de1/
Log: Update
diff --git a/planning/py3.5/2016-august-progress.rst b/planning/py3.5/2016-august-progress.rst
--- a/planning/py3.5/2016-august-progress.rst
+++ b/planning/py3.5/2016-august-progress.rst
@@ -6,6 +6,10 @@
* Implement changes to memory view. e.g. hex(): https://bugs.python.org/issue9951 (plan_rich)
+* Make a translated py3.5 actually work a bit (currently we get
+ systematic failures), up to the point where we can meaningfully
+ run the lib-python tests (arigo)
+
Finished
--------
From pypy.commits at gmail.com Tue Aug 16 05:14:12 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 02:14:12 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-memoryview: hex method for memoryview +
tests
Message-ID: <57b2d964.45d11c0a.31711.d5e6@mx.google.com>
Author: Richard Plangger
Branch: py3.5-memoryview
Changeset: r86212:8e69fe818876
Date: 2016-08-16 11:13 +0200
http://bitbucket.org/pypy/pypy/changeset/8e69fe818876/
Log: hex method for memoryview + tests
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -491,6 +491,24 @@
i += 2
return data
+HEXDIGITS = "0123456789abcdef"
+PY_SIZE_T_MAX = 2**(rffi.sizeof(rffi.SIZE_T)*8)-1
+
+def _array_to_hexstring(space, buf):
+ length = buf.getlength()
+ hexstring = StringBuilder(length*2)
+
+ if length > PY_SIZE_T_MAX/2:
+ raise OperationError(space.w_MemoryError)
+
+ for i in range(length):
+ byte = ord(buf.getitem(i))
+ c = (byte >> 4 & 0xf)
+ hexstring.append(HEXDIGITS[c])
+ c = (byte & 0xf)
+ hexstring.append(HEXDIGITS[c])
+
+ return space.wrap(hexstring.build())
class BytearrayDocstrings:
"""bytearray(iterable_of_ints) -> bytearray
diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
--- a/pypy/objspace/std/memoryobject.py
+++ b/pypy/objspace/std/memoryobject.py
@@ -232,6 +232,11 @@
newitemsize = self.get_native_fmtchar(fmt)
return W_MemoryView(self.buf, fmt, newitemsize)
+ def descr_hex(self, space):
+ from pypy.objspace.std.bytearrayobject import _array_to_hexstring
+ self._check_released(space)
+ return _array_to_hexstring(space, self.buf)
+
W_MemoryView.typedef = TypeDef(
"memoryview",
@@ -250,6 +255,7 @@
__exit__ = interp2app(W_MemoryView.descr_exit),
__weakref__ = make_weakref_descr(W_MemoryView),
cast = interp2app(W_MemoryView.descr_cast),
+ hex = interp2app(W_MemoryView.descr_hex),
tobytes = interp2app(W_MemoryView.descr_tobytes),
tolist = interp2app(W_MemoryView.descr_tolist),
release = interp2app(W_MemoryView.descr_release),
diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py
--- a/pypy/objspace/std/test/test_memoryobject.py
+++ b/pypy/objspace/std/test/test_memoryobject.py
@@ -18,6 +18,7 @@
assert len(w) == 2
exc = raises(NotImplementedError, "v[0:2:2]")
assert str(exc.value) == ""
+ exc = raises(TypeError, "memoryview('foobar')")
def test_rw(self):
data = bytearray(b'abcefg')
@@ -161,3 +162,6 @@
raises(ValueError, memoryview(b"foobar")._pypy_raw_address)
a = memoryview(bytearray(b"foobar"))._pypy_raw_address()
assert a != 0
+
+ def test_hex(self):
+ assert memoryview(b"abc").hex() == u'616263'
From pypy.commits at gmail.com Tue Aug 16 05:15:52 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Tue, 16 Aug 2016 02:15:52 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Merge with py3.5
Message-ID: <57b2d9c8.45c8c20a.dc13a.9a76@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86213:a955efeebd40
Date: 2016-08-16 11:14 +0200
http://bitbucket.org/pypy/pypy/changeset/a955efeebd40/
Log: Merge with py3.5
diff too long, truncating to 2000 out of 4303 lines
diff --git a/include/PyPy.h b/include/PyPy.h
--- a/include/PyPy.h
+++ b/include/PyPy.h
@@ -2,7 +2,11 @@
#define _PYPY_H_
/* This header is meant to be included in programs that use PyPy as an
- embedded library. */
+ embedded library.
+
+ NOTE: this is deprecated. Instead, use cffi's embedding support:
+ http://cffi.readthedocs.org/en/latest/embedding.html
+*/
#ifdef __cplusplus
extern "C" {
diff --git a/lib-python/3/code.py b/lib-python/3/code.py
--- a/lib-python/3/code.py
+++ b/lib-python/3/code.py
@@ -140,32 +140,15 @@
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
- lines = []
- for value, tb in traceback._iter_chain(*ei[1:]):
- if isinstance(value, str):
- lines.append(value)
- lines.append('\n')
- continue
- if tb:
- tblist = traceback.extract_tb(tb)
- if tb is last_tb:
- # The last traceback includes the frame we
- # exec'd in
- del tblist[:1]
- tblines = traceback.format_list(tblist)
- if tblines:
- lines.append("Traceback (most recent call last):\n")
- lines.extend(tblines)
- lines.extend(traceback.format_exception_only(type(value),
- value))
+ lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
+ if sys.excepthook is sys.__excepthook__:
+ self.write(''.join(lines))
+ else:
+ # If someone has set sys.excepthook, we let that take precedence
+ # over self.write
+ sys.excepthook(ei[0], ei[1], last_tb)
finally:
- tblist = last_tb = ei = None
- if sys.excepthook is sys.__excepthook__:
- self.write(''.join(lines))
- else:
- # If someone has set sys.excepthook, we let that take precedence
- # over self.write
- sys.excepthook(sys.last_type, sys.last_value, last_tb)
+ last_tb = ei = None
def write(self, data):
"""Write a string.
diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py
--- a/lib-python/3/distutils/command/build_ext.py
+++ b/lib-python/3/distutils/command/build_ext.py
@@ -11,7 +11,6 @@
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
-from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
@@ -30,6 +29,7 @@
show_compilers()
def _get_c_extension_suffix():
+ import importlib
suffixes = importlib.machinery.EXTENSION_SUFFIXES
return suffixes[0] if suffixes else None
@@ -204,6 +204,7 @@
# this allows distutils on windows to work in the source tree
if 0:
# pypy has no config_h_filename directory
+ from distutils.sysconfig import get_config_h_filename
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
diff --git a/lib-python/3/site.py b/lib-python/3/site.py
--- a/lib-python/3/site.py
+++ b/lib-python/3/site.py
@@ -378,8 +378,8 @@
license = "See https://www.python.org/psf/license/"
licenseargs = (license, files, dirs)
- builtins.credits = _Printer("credits", credits)
- builtins.license = _Printer("license", *licenseargs)
+ builtins.credits = _sitebuiltins._Printer("credits", credits)
+ builtins.license = _sitebuiltins._Printer("license", *licenseargs)
def sethelper():
builtins.help = _sitebuiltins._Helper()
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.7.0
+Version: 1.8.0
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.7.0"
-__version_info__ = (1, 7, 0)
+__version__ = "1.8.0"
+__version_info__ = (1, 8, 0)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -42,7 +42,9 @@
# include
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
- typedef unsigned char _Bool;
+# ifndef __cplusplus
+ typedef unsigned char _Bool;
+# endif
# endif
#else
# include
@@ -59,7 +61,7 @@
#ifdef __cplusplus
# ifndef _Bool
-# define _Bool bool /* semi-hackish: C++ has no _Bool; bool is builtin */
+ typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
@@ -196,20 +198,6 @@
return NULL;
}
-_CFFI_UNUSED_FN
-static PyObject **_cffi_unpack_args(PyObject *args_tuple, Py_ssize_t expected,
- const char *fnname)
-{
- if (PyTuple_GET_SIZE(args_tuple) != expected) {
- PyErr_Format(PyExc_TypeError,
- "%.150s() takes exactly %zd arguments (%zd given)",
- fnname, expected, PyTuple_GET_SIZE(args_tuple));
- return NULL;
- }
- return &PyTuple_GET_ITEM(args_tuple, 0); /* pointer to the first item,
- the others follow */
-}
-
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.7.0"
+ "\ncompiled with cffi version: 1.8.0"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -519,12 +519,10 @@
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- import warnings
- warnings.warn("%r has no values explicitly defined; next version "
- "will refuse to guess which integer type it is "
- "meant to be (unsigned/signed, int/long)"
- % self._get_c_name())
- smallest_value = largest_value = 0
+ raise api.CDefError("%r has no values explicitly defined: "
+ "refusing to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -275,6 +275,8 @@
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
+ if self.ffi._embedding is None:
+ prnt('#define Py_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
@@ -683,13 +685,11 @@
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
- prnt(' PyObject **aa;')
prnt()
- prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
- prnt(' if (aa == NULL)')
+ prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
+ name, len(rng), len(rng),
+ ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
- for i in rng:
- prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
@@ -862,6 +862,8 @@
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
+ self._check_not_opaque(fldtype,
+ "field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
@@ -911,6 +913,13 @@
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
+ def _check_not_opaque(self, tp, location):
+ while isinstance(tp, model.ArrayType):
+ tp = tp.item
+ if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
+ raise TypeError(
+ "%s is of an opaque type (not declared in cdef())" % location)
+
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -99,17 +99,24 @@
The garbage collectors used or implemented by PyPy are not based on
reference counting, so the objects are not freed instantly when they are no
-longer reachable. The most obvious effect of this is that files are not
+longer reachable. The most obvious effect of this is that files (and sockets, etc) are not
promptly closed when they go out of scope. For files that are opened for
writing, data can be left sitting in their output buffers for a while, making
the on-disk file appear empty or truncated. Moreover, you might reach your
OS's limit on the number of concurrently opened files.
-Fixing this is essentially impossible without forcing a
+If you are debugging a case where a file in your program is not closed
+properly, you can use the ``-X track-resources`` command line option. If it is
+given, a ``ResourceWarning`` is produced for every file and socket that the
+garbage collector closes. The warning will contain the stack trace of the
+position where the file or socket was created, to make it easier to see which
+parts of the program don't close files explicitly.
+
+Fixing this difference to CPython is essentially impossible without forcing a
reference-counting approach to garbage collection. The effect that you
get in CPython has clearly been described as a side-effect of the
implementation and not a language design decision: programs relying on
-this are basically bogus. It would anyway be insane to try to enforce
+this are basically bogus. It would be a too strong restriction to try to enforce
CPython's behavior in a language spec, given that it has no chance to be
adopted by Jython or IronPython (or any other port of Python to Java or
.NET).
@@ -134,7 +141,7 @@
Here are some more technical details. This issue affects the precise
time at which ``__del__`` methods are called, which
-is not reliable in PyPy (nor Jython nor IronPython). It also means that
+is not reliable or timely in PyPy (nor Jython nor IronPython). It also means that
**weak references** may stay alive for a bit longer than expected. This
makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less
useful: they will appear to stay alive for a bit longer in PyPy, and
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -14,10 +14,9 @@
Defaults to 1/2 of your cache or ``4M``.
Small values (like 1 or 1KB) are useful for debugging.
-``PYPY_GC_NURSERY_CLEANUP``
- The interval at which nursery is cleaned up. Must
- be smaller than the nursery size and bigger than the
- biggest object we can allotate in the nursery.
+``PYPY_GC_NURSERY_DEBUG``
+ If set to non-zero, will fill nursery with garbage, to help
+ debugging.
``PYPY_GC_INCREMENT_STEP``
The size of memory marked during the marking step. Default is size of
@@ -62,3 +61,8 @@
use.
Values are ``0`` (off), ``1`` (on major collections) or ``2`` (also
on minor collections).
+
+``PYPY_GC_MAX_PINNED``
+ The maximal number of pinned objects at any point in time. Defaults
+ to a conservative value depending on nursery size and maximum object
+ size inside the nursery. Useful for debugging by setting it to 0.
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -2,6 +2,9 @@
pypy
======
+.. note: this is turned into a regular man page "pypy.1" by
+ doing "make man" in pypy/doc/
+
SYNOPSIS
========
@@ -48,6 +51,10 @@
-B
Disable writing bytecode (``.pyc``) files.
+-X track-resources
+ Produce a ``ResourceWarning`` whenever a file or socket is closed by the
+ garbage collector.
+
--version
Print the PyPy version.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -105,3 +105,26 @@
.. branch: ep2016sprint
Trying harder to make hash(-1) return -2, like it does on CPython
+
+.. branch: jitlog-exact-source-lines
+
+Log exact line positions in debug merge points.
+
+.. branch: null_byte_after_str
+
+Allocate all RPython strings with one extra byte, normally unused.
+It is used to hold a final zero in case we need some ``char *``
+representation of the string, together with checks like ``not
+can_move()`` or object pinning. Main new thing that this allows:
+``ffi.from_buffer(string)`` in CFFI. Additionally, and most
+importantly, CFFI calls that take directly a string as argument don't
+copy the string any more---this is like CFFI on CPython.
+
+.. branch: resource_warning
+
+Add a new command line option -X track-resources which will produce
+ResourceWarnings when the GC closes unclosed files and sockets.
+
+.. branch: cpyext-realloc
+
+Implement PyObject_Realloc
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -114,8 +114,15 @@
return getattr(space, name)(operand)
return do_fold
-def _fold_pow(space, left, right):
- return space.pow(left, right, space.w_None)
+def _fold_pow(space, w_left, w_right):
+ # don't constant-fold if "w_left" and "w_right" are integers and
+ # the estimated bit length of the power is unreasonably large
+ space.appexec([w_left, w_right], """(left, right):
+ if isinstance(left, (int, long)) and isinstance(right, (int, long)):
+ if left.bit_length() * right > 5000:
+ raise OverflowError
+ """)
+ return space.pow(w_left, w_right, space.w_None)
def _fold_not(space, operand):
return space.wrap(not space.is_true(operand))
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1322,6 +1322,25 @@
assert ops.BUILD_SET not in counts
assert ops.LOAD_CONST in counts
+ def test_dont_fold_huge_powers(self):
+ for source in (
+ "2 ** 3000", # not constant-folded: too big
+ "(-2) ** 3000",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER in counts
+
+ for source in (
+ "2 ** 2000", # constant-folded
+ "2 ** -3000",
+ "1.001 ** 3000",
+ "1 ** 3000.0",
+ ):
+ source = 'def f(): %s' % source
+ counts = self.count_instructions(source)
+ assert ops.BINARY_POWER not in counts
+
def test_call_function_var(self):
source = """call(*me)"""
code, blocks = generate_function_code(source, self.space)
diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py
--- a/pypy/interpreter/astcompiler/test/test_validate.py
+++ b/pypy/interpreter/astcompiler/test/test_validate.py
@@ -38,9 +38,8 @@
self.mod(m, "must have Load context", "eval")
def _check_arguments(self, fac, check):
- def arguments(args=None, vararg=None, varargannotation=None,
- kwonlyargs=None, kwarg=None, kwargannotation=None,
- defaults=None, kw_defaults=None):
+ def arguments(args=None, vararg=None, kwonlyargs=None,
+ kw_defaults=None, kwarg=None, defaults=None):
if args is None:
args = []
if kwonlyargs is None:
@@ -49,20 +48,12 @@
defaults = []
if kw_defaults is None:
kw_defaults = []
- args = ast.arguments(args, vararg, varargannotation, kwonlyargs,
- kwarg, kwargannotation, defaults, kw_defaults)
+ args = ast.arguments(args, vararg, kwonlyargs,
+ kw_defaults, kwarg, defaults)
return fac(args)
args = [ast.arg("x", ast.Name("x", ast.Store, 0, 0))]
check(arguments(args=args), "must have Load context")
- check(arguments(varargannotation=ast.Num(self.space.wrap(3), 0, 0)),
- "varargannotation but no vararg")
- check(arguments(varargannotation=ast.Name("x", ast.Store, 0, 0), vararg="x"),
- "must have Load context")
check(arguments(kwonlyargs=args), "must have Load context")
- check(arguments(kwargannotation=ast.Num(self.space.wrap(42), 0, 0)),
- "kwargannotation but no kwarg")
- check(arguments(kwargannotation=ast.Name("x", ast.Store, 0, 0),
- kwarg="x"), "must have Load context")
check(arguments(defaults=[ast.Num(self.space.wrap(3), 0, 0)]),
"more positional defaults than args")
check(arguments(kw_defaults=[ast.Num(self.space.wrap(4), 0, 0)]),
@@ -77,7 +68,7 @@
"must have Load context")
def test_funcdef(self):
- a = ast.arguments([], None, None, [], None, None, [], [])
+ a = ast.arguments([], None, [], [], None, [])
f = ast.FunctionDef("x", a, [], [], None, 0, 0)
self.stmt(f, "empty body on FunctionDef")
f = ast.FunctionDef("x", a, [ast.Pass(0, 0)], [ast.Name("x", ast.Store, 0, 0)],
@@ -91,8 +82,7 @@
self._check_arguments(fac, self.stmt)
def test_classdef(self):
- def cls(bases=None, keywords=None, starargs=None, kwargs=None,
- body=None, decorator_list=None):
+ def cls(bases=None, keywords=None, body=None, decorator_list=None):
if bases is None:
bases = []
if keywords is None:
@@ -101,16 +91,12 @@
body = [ast.Pass(0, 0)]
if decorator_list is None:
decorator_list = []
- return ast.ClassDef("myclass", bases, keywords, starargs,
- kwargs, body, decorator_list, 0, 0)
+ return ast.ClassDef("myclass", bases, keywords,
+ body, decorator_list, 0, 0)
self.stmt(cls(bases=[ast.Name("x", ast.Store, 0, 0)]),
"must have Load context")
self.stmt(cls(keywords=[ast.keyword("x", ast.Name("x", ast.Store, 0, 0))]),
"must have Load context")
- self.stmt(cls(starargs=ast.Name("x", ast.Store, 0, 0)),
- "must have Load context")
- self.stmt(cls(kwargs=ast.Name("x", ast.Store, 0, 0)),
- "must have Load context")
self.stmt(cls(body=[]), "empty body on ClassDef")
self.stmt(cls(body=[None]), "None disallowed")
self.stmt(cls(decorator_list=[ast.Name("x", ast.Store, 0, 0)]),
@@ -250,7 +236,7 @@
self.expr(u, "must have Load context")
def test_lambda(self):
- a = ast.arguments([], None, None, [], None, None, [], [])
+ a = ast.arguments([], None, [], [], None, [])
self.expr(ast.Lambda(a, ast.Name("x", ast.Store, 0, 0), 0, 0),
"must have Load context")
def fac(args):
@@ -343,20 +329,12 @@
func = ast.Name("x", ast.Load, 0, 0)
args = [ast.Name("y", ast.Load, 0, 0)]
keywords = [ast.keyword("w", ast.Name("z", ast.Load, 0, 0))]
- stararg = ast.Name("p", ast.Load, 0, 0)
- kwarg = ast.Name("q", ast.Load, 0, 0)
- call = ast.Call(ast.Name("x", ast.Store, 0, 0), args, keywords, stararg,
- kwarg, 0, 0)
+ call = ast.Call(ast.Name("x", ast.Store, 0, 0), args, keywords, 0, 0)
self.expr(call, "must have Load context")
- call = ast.Call(func, [None], keywords, stararg, kwarg, 0, 0)
+ call = ast.Call(func, [None], keywords, 0, 0)
self.expr(call, "None disallowed")
bad_keywords = [ast.keyword("w", ast.Name("z", ast.Store, 0, 0))]
- call = ast.Call(func, args, bad_keywords, stararg, kwarg, 0, 0)
- self.expr(call, "must have Load context")
- call = ast.Call(func, args, keywords, ast.Name("z", ast.Store, 0, 0), kwarg, 0, 0)
- self.expr(call, "must have Load context")
- call = ast.Call(func, args, keywords, stararg,
- ast.Name("w", ast.Store, 0, 0), 0, 0)
+ call = ast.Call(func, args, bad_keywords, 0, 0)
self.expr(call, "must have Load context")
def test_num(self):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1375,10 +1375,11 @@
for i in range(itemcount, 0, -1):
w_item = self.peekvalue(i-1)
# cannot use w_sum.update, w_item might not be a set
- iterator = w_item.itervalues()
+ iterator = space.iter(w_item)
while True:
- w_value = iterator.next_value()
- if w_value is None:
+ try:
+ w_value = space.next(iterator)
+ except OperationError:
break
w_sum.add(w_value)
while itemcount != 0:
diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py
--- a/pypy/interpreter/pyparser/test/test_pyparse.py
+++ b/pypy/interpreter/pyparser/test/test_pyparse.py
@@ -167,6 +167,12 @@
py.test.raises(SyntaxError, self.parse, 'f()\n# blah\nblah()', "single")
py.test.raises(SyntaxError, self.parse, 'f()\nxy # blah\nblah()', "single")
py.test.raises(SyntaxError, self.parse, 'x = 5 # comment\nx = 6\n', "single")
+
+ def test_unpack(self):
+ self.parse('[*{2}, 3, *[4]]')
+ self.parse('{*{2}, 3, *[4]}')
+ self.parse('{**{}, 3:4, **{5:6, 7:8}}')
+ self.parse('f(2, *a, *b, **b, **c, **d)')
def test_async_await(self):
self.parse("async def coro(): await func")
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -209,7 +209,6 @@
self.check(['-c', 'pass'], {'PYTHONNOUSERSITE': '1'}, sys_argv=['-c'],
run_command='pass', **expected)
-
class TestInteraction:
"""
These tests require pexpect (UNIX-only).
@@ -1152,4 +1151,3 @@
# assert it did not crash
finally:
sys.path[:] = old_sys_path
-
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -134,7 +134,7 @@
assert self.space.lookup(w_instance, "gobbledygook") is None
w_instance = self.space.appexec([], """():
class Lookup(object):
- "bla"
+ "bla"
return Lookup()""")
assert self.space.str_w(self.space.lookup(w_instance, "__doc__")) == "bla"
@@ -148,7 +148,7 @@
assert is_callable(w_func)
w_lambda_func = self.space.appexec([], "(): return lambda: True")
assert is_callable(w_lambda_func)
-
+
w_instance = self.space.appexec([], """():
class Call(object):
def __call__(self): pass
@@ -308,7 +308,7 @@
def test_call_obj_args(self):
from pypy.interpreter.argument import Arguments
-
+
space = self.space
w_f = space.appexec([], """():
@@ -333,7 +333,7 @@
assert w_x is w_9
assert w_y is w_1
- w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
+ w_res = space.call_obj_args(w_a, w_9, Arguments(space, []))
assert w_res is w_9
def test_compare_by_iteration(self):
@@ -383,7 +383,7 @@
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('g')))
assert not space.isabstractmethod_w(space.getattr(w_B, space.wrap('h')))
-class TestModuleMinimal:
+class TestModuleMinimal:
def test_sys_exists(self):
assert self.space.sys
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -18,3 +18,31 @@
loop.run_until_complete(f())
print("done with async loop")
"""
+
+ def test_asynchronous_context_managers(self):
+ """
+import encodings.idna
+import asyncio
+
+class Corotest(object):
+ def __init__(self):
+ self.res = "-"
+
+ async def coro(self, name, lock):
+ self.res += ' coro {}: waiting for lock -'.format(name)
+ async with lock:
+ self.res += ' coro {}: holding the lock -'.format(name)
+ await asyncio.sleep(1)
+ self.res += ' coro {}: releasing the lock -'.format(name)
+
+cor = Corotest()
+loop = asyncio.get_event_loop()
+lock = asyncio.Lock()
+coros = asyncio.gather(cor.coro(1, lock), cor.coro(2, lock))
+try:
+ loop.run_until_complete(coros)
+finally:
+ loop.close()
+
+assert cor.res == "- coro 1: waiting for lock - coro 1: holding the lock - coro 2: waiting for lock - coro 1: releasing the lock - coro 2: holding the lock - coro 2: releasing the lock -"
+ """
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.7.0"
+VERSION = "1.8.0"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -157,11 +157,13 @@
mustfree_max_plus_1 = 0
buffer = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
try:
+ keepalives = [None] * len(args_w) # None or strings
for i in range(len(args_w)):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
w_obj = args_w[i]
argtype = self.fargs[i]
- if argtype.convert_argument_from_object(data, w_obj):
+ if argtype.convert_argument_from_object(data, w_obj,
+ keepalives, i):
# argtype is a pointer type, and w_obj a list/tuple/str
mustfree_max_plus_1 = i + 1
@@ -177,9 +179,13 @@
if isinstance(argtype, W_CTypePointer):
data = rffi.ptradd(buffer, cif_descr.exchange_args[i])
flag = get_mustfree_flag(data)
+ raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
if flag == 1:
- raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
+ elif flag >= 4:
+ value = keepalives[i]
+ assert value is not None
+ rffi.free_nonmovingbuffer(value, raw_cdata, chr(flag))
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py
--- a/pypy/module/_cffi_backend/ctypeobj.py
+++ b/pypy/module/_cffi_backend/ctypeobj.py
@@ -83,7 +83,7 @@
raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'",
self.name)
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
self.convert_from_object(cdata, w_ob)
return False
diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -16,8 +16,8 @@
class W_CTypePtrOrArray(W_CType):
- _attrs_ = ['ctitem', 'can_cast_anything', 'length']
- _immutable_fields_ = ['ctitem', 'can_cast_anything', 'length']
+ _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
+ _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length']
length = -1
def __init__(self, space, size, extra, extra_position, ctitem,
@@ -30,6 +30,9 @@
# - for functions, it is the return type
self.ctitem = ctitem
self.can_cast_anything = could_cast_anything and ctitem.cast_anything
+ self.accept_str = (self.can_cast_anything or
+ (ctitem.is_primitive_integer and
+ ctitem.size == rffi.sizeof(lltype.Char)))
def is_unichar_ptr_or_array(self):
return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar)
@@ -72,9 +75,7 @@
pass
else:
self._convert_array_from_listview(cdata, space.listview(w_ob))
- elif (self.can_cast_anything or
- (self.ctitem.is_primitive_integer and
- self.ctitem.size == rffi.sizeof(lltype.Char))):
+ elif self.accept_str:
if not space.isinstance_w(w_ob, space.w_str):
raise self._convert_error("bytes or list or tuple", w_ob)
s = space.str_w(w_ob)
@@ -262,8 +263,16 @@
else:
return lltype.nullptr(rffi.CCHARP.TO)
- def _prepare_pointer_call_argument(self, w_init, cdata):
+ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i):
space = self.space
+ if self.accept_str and space.isinstance_w(w_init, space.w_str):
+ # special case to optimize strings passed to a "char *" argument
+ value = space.bytes_w(w_init)
+ keepalives[i] = value
+ buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value)
+ rffi.cast(rffi.CCHARPP, cdata)[0] = buf
+ return ord(buf_flag) # 4, 5 or 6
+ #
if (space.isinstance_w(w_init, space.w_list) or
space.isinstance_w(w_init, space.w_tuple)):
length = space.int_w(space.len(w_init))
@@ -300,10 +309,11 @@
rffi.cast(rffi.CCHARPP, cdata)[0] = result
return 1
- def convert_argument_from_object(self, cdata, w_ob):
+ def convert_argument_from_object(self, cdata, w_ob, keepalives, i):
from pypy.module._cffi_backend.ctypefunc import set_mustfree_flag
result = (not isinstance(w_ob, cdataobj.W_CData) and
- self._prepare_pointer_call_argument(w_ob, cdata))
+ self._prepare_pointer_call_argument(w_ob, cdata,
+ keepalives, i))
if result == 0:
self.convert_from_object(cdata, w_ob)
set_mustfree_flag(cdata, result)
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -353,7 +353,7 @@
'array.array' or numpy arrays."""
#
w_ctchara = newtype._new_chara_type(self.space)
- return func.from_buffer(self.space, w_ctchara, w_python_buffer)
+ return func._from_buffer(self.space, w_ctchara, w_python_buffer)
@unwrap_spec(w_arg=W_CData)
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -1,7 +1,8 @@
from rpython.rtyper.annlowlevel import llstr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw
-from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rlib.objectmodel import keepalive_until_here, we_are_translated
+from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
@@ -132,17 +133,66 @@
raise oefmt(space.w_TypeError,
"needs 'char[]', got '%s'", w_ctype.name)
#
+ return _from_buffer(space, w_ctype, w_x)
+
+def _from_buffer(space, w_ctype, w_x):
buf = _fetch_as_read_buffer(space, w_x)
- try:
- _cdata = buf.get_raw_address()
- except ValueError:
- raise oefmt(space.w_TypeError,
- "from_buffer() got a '%T' object, which supports the "
- "buffer interface but cannot be rendered as a plain "
- "raw address on PyPy", w_x)
+ if space.isinstance_w(w_x, space.w_str):
+ _cdata = get_raw_address_of_string(space, w_x)
+ else:
+ try:
+ _cdata = buf.get_raw_address()
+ except ValueError:
+ raise oefmt(space.w_TypeError,
+ "from_buffer() got a '%T' object, which supports the "
+ "buffer interface but cannot be rendered as a plain "
+ "raw address on PyPy", w_x)
#
return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x)
+# ____________________________________________________________
+
+class RawBytes(object):
+ def __init__(self, string):
+ self.ptr = rffi.str2charp(string, track_allocation=False)
+ def __del__(self):
+ rffi.free_charp(self.ptr, track_allocation=False)
+
+class RawBytesCache(object):
+ def __init__(self, space):
+ from pypy.interpreter.baseobjspace import W_Root
+ from rpython.rlib import rweakref
+ self.wdict = rweakref.RWeakKeyDictionary(W_Root, RawBytes)
+
+ at jit.dont_look_inside
+def get_raw_address_of_string(space, w_x):
+ """Special case for ffi.from_buffer(string). Returns a 'char *' that
+ is valid as long as the string object is alive. Two calls to
+ ffi.from_buffer(same_string) are guaranteed to return the same pointer.
+ """
+ from rpython.rtyper.annlowlevel import llstr
+ from rpython.rtyper.lltypesystem.rstr import STR
+ from rpython.rtyper.lltypesystem import llmemory
+ from rpython.rlib import rgc
+
+ cache = space.fromcache(RawBytesCache)
+ rawbytes = cache.wdict.get(w_x)
+ if rawbytes is None:
+ data = space.str_w(w_x)
+ if we_are_translated() and not rgc.can_move(data):
+ lldata = llstr(data)
+ data_start = (llmemory.cast_ptr_to_adr(lldata) +
+ rffi.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ data_start = rffi.cast(rffi.CCHARP, data_start)
+ data_start[len(data)] = '\x00' # write the final extra null
+ return data_start
+ rawbytes = RawBytes(data)
+ cache.wdict.set(w_x, rawbytes)
+ return rawbytes.ptr
+
+# ____________________________________________________________
+
def unsafe_escaping_ptr_for_ptr_or_array(w_cdata):
if not w_cdata.ctype.is_nonfunc_pointer_or_array:
diff --git a/pypy/module/_cffi_backend/parse_c_type.py b/pypy/module/_cffi_backend/parse_c_type.py
--- a/pypy/module/_cffi_backend/parse_c_type.py
+++ b/pypy/module/_cffi_backend/parse_c_type.py
@@ -97,11 +97,8 @@
[rffi.INT], rffi.CCHARP)
def parse_c_type(info, input):
- p_input = rffi.str2charp(input)
- try:
+ with rffi.scoped_view_charp(input) as p_input:
res = ll_parse_c_type(info, p_input)
- finally:
- rffi.free_charp(p_input)
return rffi.cast(lltype.Signed, res)
NULL_CTX = lltype.nullptr(PCTX.TO)
@@ -130,15 +127,13 @@
return rffi.getintfield(src_ctx, 'c_num_types')
def search_in_globals(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_globals(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_globals(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
def search_in_struct_unions(ctx, name):
- c_name = rffi.str2charp(name)
- result = ll_search_in_struct_unions(ctx, c_name,
- rffi.cast(rffi.SIZE_T, len(name)))
- rffi.free_charp(c_name)
+ with rffi.scoped_view_charp(name) as c_name:
+ result = ll_search_in_struct_unions(ctx, c_name,
+ rffi.cast(rffi.SIZE_T, len(name)))
return rffi.cast(lltype.Signed, result)
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.7.0", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
@@ -3330,13 +3330,18 @@
BChar = new_primitive_type("char")
BCharP = new_pointer_type(BChar)
BCharA = new_array_type(BCharP, None)
- py.test.raises(TypeError, from_buffer, BCharA, b"foo")
+ p1 = from_buffer(BCharA, b"foo")
+ assert p1 == from_buffer(BCharA, b"foo")
+ import gc; gc.collect()
+ assert p1 == from_buffer(BCharA, b"foo")
py.test.raises(TypeError, from_buffer, BCharA, u+"foo")
try:
from __builtin__ import buffer
except ImportError:
pass
else:
+ # from_buffer(buffer(b"foo")) does not work, because it's not
+ # implemented on pypy; only from_buffer(b"foo") works.
py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo"))
py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo"))
try:
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -402,21 +402,20 @@
_WriteFile, ERROR_NO_SYSTEM_RESOURCES)
from rpython.rlib import rwin32
- charp = rffi.str2charp(buf)
- written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
- flavor='raw')
- try:
- result = _WriteFile(
- self.handle, rffi.ptradd(charp, offset),
- size, written_ptr, rffi.NULL)
+ with rffi.scoped_view_charp(buf) as charp:
+ written_ptr = lltype.malloc(rffi.CArrayPtr(rwin32.DWORD).TO, 1,
+ flavor='raw')
+ try:
+ result = _WriteFile(
+ self.handle, rffi.ptradd(charp, offset),
+ size, written_ptr, rffi.NULL)
- if (result == 0 and
- rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
- raise oefmt(space.w_ValueError,
- "Cannot send %d bytes over connection", size)
- finally:
- rffi.free_charp(charp)
- lltype.free(written_ptr, flavor='raw')
+ if (result == 0 and
+ rwin32.GetLastError_saved() == ERROR_NO_SYSTEM_RESOURCES):
+ raise oefmt(space.w_ValueError,
+ "Cannot send %d bytes over connection", size)
+ finally:
+ lltype.free(written_ptr, flavor='raw')
def do_recv_string(self, space, buflength, maxlength):
from pypy.module._multiprocessing.interp_win32 import (
diff --git a/pypy/module/_posixsubprocess/interp_subprocess.py b/pypy/module/_posixsubprocess/interp_subprocess.py
--- a/pypy/module/_posixsubprocess/interp_subprocess.py
+++ b/pypy/module/_posixsubprocess/interp_subprocess.py
@@ -15,8 +15,9 @@
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
- includes=['unistd.h', 'sys/syscall.h'])
+ includes=['unistd.h', 'sys/syscall.h', 'sys/stat.h'])
HAVE_SYS_SYSCALL_H = platform.Has("syscall")
+ HAVE_SYS_STAT_H = platform.Has("stat")
HAVE_SETSID = platform.Has("setsid")
config = platform.configure(CConfig)
@@ -29,6 +30,8 @@
compile_extra = []
if config['HAVE_SYS_SYSCALL_H']:
compile_extra.append("-DHAVE_SYS_SYSCALL_H")
+if config['HAVE_SYS_STAT_H']:
+ compile_extra.append("-DHAVE_SYS_STAT_H")
if config['HAVE_SETSID']:
compile_extra.append("-DHAVE_SETSID")
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -1,6 +1,7 @@
import sys, os
-import py
+import pytest
from pypy.tool.pytest.objspace import gettestobjspace
+from pypy.interpreter.gateway import interp2app
from rpython.tool.udir import udir
from rpython.rlib import rsocket
from rpython.rtyper.lltypesystem import lltype, rffi
@@ -13,8 +14,6 @@
mod.w_socket = space.appexec([], "(): import _socket as m; return m")
mod.path = udir.join('fd')
mod.path.write('fo')
- mod.raises = py.test.raises # make raises available from app-level tests
- mod.skip = py.test.skip
def test_gethostname():
host = space.appexec([w_socket], "(_socket): return _socket.gethostname()")
@@ -42,7 +41,7 @@
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and not ipv6:
from pypy.interpreter.error import OperationError
- with py.test.raises(OperationError):
+ with pytest.raises(OperationError):
space.appexec([w_socket, space.wrap(host)],
"(_socket, host): return _socket.gethostbyaddr(host)")
continue
@@ -58,14 +57,14 @@
assert space.unwrap(port) == 25
# 1 arg version
if sys.version_info < (2, 4):
- py.test.skip("getservbyname second argument is not optional before python 2.4")
+ pytest.skip("getservbyname second argument is not optional before python 2.4")
port = space.appexec([w_socket, space.wrap(name)],
"(_socket, name): return _socket.getservbyname(name)")
assert space.unwrap(port) == 25
def test_getservbyport():
if sys.version_info < (2, 4):
- py.test.skip("getservbyport does not exist before python 2.4")
+ pytest.skip("getservbyport does not exist before python 2.4")
port = 25
# 2 args version
name = space.appexec([w_socket, space.wrap(port)],
@@ -139,7 +138,7 @@
def test_pton_ntop_ipv4():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
tests = [
("123.45.67.89", "\x7b\x2d\x43\x59"),
("0.0.0.0", "\x00" * 4),
@@ -155,9 +154,9 @@
def test_ntop_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -176,9 +175,9 @@
def test_pton_ipv6():
if not hasattr(socket, 'inet_pton'):
- py.test.skip('No socket.inet_pton on this platform')
+ pytest.skip('No socket.inet_pton on this platform')
if not socket.has_ipv6:
- py.test.skip("No IPv6 on this platform")
+ pytest.skip("No IPv6 on this platform")
tests = [
("\x00" * 16, "::"),
("\x01" * 16, ":".join(["101"] * 8)),
@@ -197,7 +196,7 @@
assert space.unwrap(w_packed) == packed
def test_has_ipv6():
- py.test.skip("has_ipv6 is always True on PyPy for now")
+ pytest.skip("has_ipv6 is always True on PyPy for now")
res = space.appexec([w_socket], "(_socket): return _socket.has_ipv6")
assert space.unwrap(res) == socket.has_ipv6
@@ -231,7 +230,7 @@
def test_addr_raw_packet():
from pypy.module._socket.interp_socket import addr_as_object
if not hasattr(rsocket._c, 'sockaddr_ll'):
- py.test.skip("posix specific test")
+ pytest.skip("posix specific test")
# HACK: To get the correct interface number of lo, which in most cases is 1,
# but can be anything (i.e. 39), we need to call the libc function
# if_nametoindex to get the correct index
@@ -653,11 +652,11 @@
class AppTestNetlink:
def setup_class(cls):
if not hasattr(os, 'getpid'):
- py.test.skip("AF_NETLINK needs os.getpid()")
+ pytest.skip("AF_NETLINK needs os.getpid()")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_NETLINK')")
if not space.is_true(w_ok):
- py.test.skip("no AF_NETLINK on this platform")
+ pytest.skip("no AF_NETLINK on this platform")
cls.space = space
def test_connect_to_kernel_netlink_routing_socket(self):
@@ -673,11 +672,11 @@
class AppTestPacket:
def setup_class(cls):
if not hasattr(os, 'getuid') or os.getuid() != 0:
- py.test.skip("AF_PACKET needs to be root for testing")
+ pytest.skip("AF_PACKET needs to be root for testing")
w_ok = space.appexec([], "(): import _socket; " +
"return hasattr(_socket, 'AF_PACKET')")
if not space.is_true(w_ok):
- py.test.skip("no AF_PACKET on this platform")
+ pytest.skip("no AF_PACKET on this platform")
cls.space = space
def test_convert_between_tuple_and_sockaddr_ll(self):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -146,7 +146,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
NPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
# set both server and client callbacks, because the context
@@ -158,7 +158,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -192,7 +192,7 @@
def __init__(self, ctx, protos):
self.protos = protos
- self.buf, self.pinned, self.is_raw = rffi.get_nonmovingbuffer(protos)
+ self.buf, self.bufflag = rffi.get_nonmovingbuffer(protos)
ALPN_STORAGE.set(rffi.cast(lltype.Unsigned, self.buf), self)
with rffi.scoped_str2charp(protos) as protos_buf:
@@ -204,7 +204,7 @@
def __del__(self):
rffi.free_nonmovingbuffer(
- self.protos, self.buf, self.pinned, self.is_raw)
+ self.protos, self.buf, self.bufflag)
@staticmethod
def selectALPN_cb(s, out_ptr, outlen_ptr, client, client_len, args):
@@ -239,7 +239,7 @@
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
- with rffi.scoped_str2charp(string) as buf:
+ with rffi.scoped_nonmovingbuffer(string) as buf:
libssl_RAND_add(buf, len(string), entropy)
def _RAND_bytes(space, n, pseudo):
diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py
--- a/pypy/module/cppyy/capi/builtin_capi.py
+++ b/pypy/module/cppyy/capi/builtin_capi.py
@@ -537,9 +537,8 @@
releasegil=ts_helper,
compilation_info=backend.eci)
def c_charp2stdstring(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2stdstring(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2stdstring(charp)
return result
_c_stdstring2stdstring = rffi.llexternal(
"cppyy_stdstring2stdstring",
diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py
--- a/pypy/module/cppyy/capi/cint_capi.py
+++ b/pypy/module/cppyy/capi/cint_capi.py
@@ -82,9 +82,8 @@
releasegil=ts_helper,
compilation_info=eci)
def c_charp2TString(space, svalue):
- charp = rffi.str2charp(svalue)
- result = _c_charp2TString(charp)
- rffi.free_charp(charp)
+ with rffi.scoped_view_charp(svalue) as charp:
+ result = _c_charp2TString(charp)
return result
_c_TString2TString = rffi.llexternal(
"cppyy_TString2TString",
diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py
--- a/pypy/module/cppyy/capi/loadable_capi.py
+++ b/pypy/module/cppyy/capi/loadable_capi.py
@@ -65,6 +65,7 @@
else: # only other use is sring
n = len(obj._string)
assert raw_string == rffi.cast(rffi.CCHARP, 0)
+ # XXX could use rffi.get_nonmovingbuffer_final_null()
raw_string = rffi.str2charp(obj._string)
data = rffi.cast(rffi.CCHARPP, data)
data[0] = raw_string
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -11,6 +11,9 @@
from rpython.rtyper.annlowlevel import llhelper
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from rpython.rlib.objectmodel import dont_inline
+from rpython.rlib.rfile import (FILEP, c_fread, c_fclose, c_fwrite,
+ c_fdopen, c_fileno,
+ c_fopen)# for tests
from rpython.translator import cdir
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.gensupp import NameManager
@@ -84,44 +87,32 @@
assert CONST_WSTRING == rffi.CWCHARP
# FILE* interface
-FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
dash = '_'
else:
dash = ''
-fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
-fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
-fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
- FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
-_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
- if not is_valid_fd(fileno(fp)):
+ if not is_valid_fd(c_fileno(fp)):
return -1
- return _fclose(fp)
+ return c_fclose(fp)
-_fwrite = rffi.llexternal('fwrite',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fwrite(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fwrite(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fwrite(buf, sz, n, fp)
-_fread = rffi.llexternal('fread',
- [rffi.VOIDP, rffi.SIZE_T, rffi.SIZE_T, FILEP],
- rffi.SIZE_T)
def fread(buf, sz, n, fp):
- validate_fd(fileno(fp))
- return _fread(buf, sz, n, fp)
+ validate_fd(c_fileno(fp))
+ return c_fread(buf, sz, n, fp)
_feof = rffi.llexternal('feof', [FILEP], rffi.INT)
def feof(fp):
- validate_fd(fileno(fp))
+ validate_fd(c_fileno(fp))
return _feof(fp)
def is_valid_fp(fp):
- return is_valid_fd(fileno(fp))
+ return is_valid_fd(c_fileno(fp))
pypy_decl = 'pypy_decl.h'
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -96,7 +96,8 @@
raise oefmt(space.w_ValueError,
"bytes_attach called on object with ob_size %d but trying to store %d",
py_str.c_ob_size, len(s))
- rffi.c_memcpy(py_str.c_ob_sval, rffi.str2charp(s), len(s))
+ with rffi.scoped_nonmovingbuffer(s) as s_ptr:
+ rffi.c_memcpy(py_str.c_ob_sval, s_ptr, len(s))
py_str.c_ob_sval[len(s)] = '\0'
py_str.c_ob_shash = space.hash_w(w_obj)
py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
diff --git a/pypy/module/cpyext/c-api.txt b/pypy/module/cpyext/c-api.txt
deleted file mode 100644
--- a/pypy/module/cpyext/c-api.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Reference Count
-===============
-
-XXX
-
-Borrowed References
-===================
-
-XXX
-
-PyStringObject support
-======================
-
-The problem
------------
-
-PyString_AsString() returns a (non-movable) pointer to the underlying
-buffer, whereas pypy strings are movable. C code may temporarily
-store this address and use it, as long as it owns a reference to the
-PyObject. There is no "release" function to specify that the pointer
-is not needed any more.
-
-Note that the pointer may be used to fill the initial value of
-string. This is valid only when the string was just allocated, and is
-not used elsewhere.
-
-Proposed solution
------------------
-
-Our emulation of the PyStringObject contains an additional member: a
-pointer to a char buffer; it may be NULL.
-
-- A string allocated by pypy will be converted into a PyStringObject
- with a NULL buffer. When PyString_AsString() is called, memory is
- allocated (with flavor='raw') and content is copied.
-
-- A string allocated with PyString_FromStringAndSize(NULL, size) will
- allocate a buffer with the specified size, but the reference won't
- be stored in the global map py_objects_r2w; there won't be a
- corresponding object in pypy. When from_ref() or Py_INCREF() is
- called, the pypy string is created, and added in py_objects_r2w.
- The buffer is then supposed to be immutable.
-
-- _PyString_Resize works only on not-yet-pypy'd strings, and returns a
- similar object.
-
-- PyString_Size don't need to force the object. (in this case, another
- "size" member is needed)
-
-- There could be an (expensive!) check in from_ref() that the buffer
- still corresponds to the pypy gc-managed string.
-
-PySequence_Fast support
-======================
-There are five functions for fast sequence access offered by the CPython API:
-
-PyObject* PySequence_Fast(PyObject *o, const char *m)
-
-PyObject* PySequence_Fast_GET_ITEM( PyObject *o, int i)
-
-PyObject** PySequence_Fast_ITEMS( PyObject *o)
-
-PyObject* PySequence_ITEM( PyObject *o, int i)
-
-int PySequence_Fast_GET_SIZE( PyObject *o)
-
-PyPy supports four of these, but does not support PySequence_Fast_ITEMS.
-(Various ways to support PySequence_Fast_ITEMS were considered. They all had
-two things in common: they would have taken a lot of work, and they would have
-resulted in incomplete semantics or in poor performance. We decided that a slow
-implementation of PySequence_Fast_ITEMS was not very useful.)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -25,6 +25,8 @@
flavor='raw',
add_memory_pressure=True)
+realloc = rffi.llexternal('realloc', [rffi.VOIDP, rffi.SIZE_T], rffi.VOIDP)
+
@cpython_api([rffi.VOIDP, size_t], rffi.VOIDP)
def PyObject_Realloc(space, ptr, size):
if not lltype.cast_ptr_to_int(ptr):
@@ -32,7 +34,7 @@
flavor='raw',
add_memory_pressure=True)
# XXX FIXME
- return lltype.nullptr(rffi.VOIDP.TO)
+ return realloc(ptr, size)
@cpython_api([rffi.VOIDP], lltype.Void)
def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -10,7 +10,7 @@
from pypy.objspace.std import tupleobject
from pypy.module.cpyext.tupleobject import PyTuple_Check, PyTuple_SetItem
-from pypy.module.cpyext.object import Py_IncRef, Py_DecRef
+from pypy.module.cpyext.pyobject import decref
from pypy.module.cpyext.dictobject import PyDict_Check
@@ -252,7 +252,7 @@
def setitem(self, w_list, index, w_obj):
storage = self.unerase(w_list.lstorage)
index = self._check_index(index, storage._length)
- Py_DecRef(w_list.space, storage._elems[index])
+ decref(w_list.space, storage._elems[index])
storage._elems[index] = make_ref(w_list.space, w_obj)
def length(self, w_list):
@@ -264,9 +264,8 @@
return storage._elems
def getslice(self, w_list, start, stop, step, length):
- #storage = self.unerase(w_list.lstorage)
- raise oefmt(w_list.space.w_NotImplementedError,
- "settting a slice of a PySequence_Fast is not supported")
+ w_list.switch_to_object_strategy()
+ return w_list.strategy.getslice(w_list, start, stop, step, length)
def getitems(self, w_list):
# called when switching list strategy, so convert storage
@@ -389,5 +388,5 @@
def __del__(self):
for i in range(self._length):
- Py_DecRef(self.space, self._elems[i])
+ decref(self.space, self._elems[i])
lltype.free(self._elems, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.eval import (
Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
-from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
+from pypy.module.cpyext.api import c_fopen, c_fclose, c_fileno, Py_ssize_tP
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.astcompiler import consts
from rpython.tool.udir import udir
@@ -130,19 +130,19 @@
def test_run_file(self, space, api):
filepath = udir / "cpyext_test_runfile.py"
filepath.write("raise ZeroDivisionError")
- fp = fopen(str(filepath), "rb")
+ fp = c_fopen(str(filepath), "rb")
filename = rffi.str2charp(str(filepath))
w_globals = w_locals = space.newdict()
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_ZeroDivisionError
api.PyErr_Clear()
# try again, but with a closed file
- fp = fopen(str(filepath), "rb")
- os.close(fileno(fp))
+ fp = c_fopen(str(filepath), "rb")
+ os.close(c_fileno(fp))
api.PyRun_File(fp, filename, Py_file_input, w_globals, w_locals)
- fclose(fp)
+ c_fclose(fp)
assert api.PyErr_Occurred() is space.w_IOError
api.PyErr_Clear()
diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -212,8 +212,9 @@
assert type(x) is float
assert x == -12.34
- @pytest.mark.skipif(True, reason='realloc not fully implemented')
def test_object_realloc(self):
+ if not self.runappdirect:
+ skip('no untranslated support for realloc')
module = self.import_extension('foo', [
("realloctest", "METH_NOARGS",
"""
@@ -221,12 +222,11 @@
char *copy, *orig = PyObject_MALLOC(12);
memcpy(orig, "hello world", 12);
copy = PyObject_REALLOC(orig, 15);
+ /* realloc() takes care of freeing orig, if changed */
if (copy == NULL)
Py_RETURN_NONE;
ret = PyBytes_FromStringAndSize(copy, 12);
- if (copy != orig)
- PyObject_Free(copy);
- PyObject_Free(orig);
+ PyObject_Free(copy);
return ret;
""")])
x = module.realloctest()
diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py
--- a/pypy/module/cpyext/test/test_sequence.py
+++ b/pypy/module/cpyext/test/test_sequence.py
@@ -78,6 +78,17 @@
assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+ def test_get_slice_fast(self, space, api):
+ w_t = space.wrap([1, 2, 3, 4, 5])
+ api.PySequence_Fast(w_t, "foo") # converts
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 2, 4)) == [3, 4]
+ assert space.unwrap(api.PySequence_GetSlice(w_t, 1, -1)) == [2, 3, 4]
+
+ assert api.PySequence_DelSlice(w_t, 1, 4) == 0
+ assert space.eq_w(w_t, space.wrap([1, 5]))
+ assert api.PySequence_SetSlice(w_t, 1, 1, space.wrap((3,))) == 0
+ assert space.eq_w(w_t, space.wrap([1, 3, 5]))
+
def test_iter(self, space, api):
w_t = space.wrap((1, 2))
w_iter = api.PySeqIter_New(w_t)
@@ -226,18 +237,33 @@
assert space.int_w(space.len(w_l)) == 10
-class XAppTestSequenceObject(AppTestCpythonExtensionBase):
- def test_sequenceobject(self):
+class AppTestSequenceObject(AppTestCpythonExtensionBase):
+ def test_fast(self):
module = self.import_extension('foo', [
("test_fast_sequence", "METH_VARARGS",
"""
- PyObject * o = PyTuple_GetItem(args, 0);
+ int size, i;
+ PyTypeObject * common_type;
+ PyObject *foo, **objects;
+ PyObject * seq = PyTuple_GetItem(args, 0);
/* XXX assert it is a tuple */
- PyObject *foo = PySequence_Fast(o, "some string");
- PyObject ** res = PySequence_Fast_ITEMS(foo);
- /* XXX do some kind of test on res */
- /* XXX now what? who manages res's refcount? */
+ if (seq == NULL)
+ Py_RETURN_NONE;
+ foo = PySequence_Fast(seq, "some string");
+ objects = PySequence_Fast_ITEMS(foo);
+ size = PySequence_Fast_GET_SIZE(seq);
+ common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
+ for (i = 1; i < size; ++i) {
+ if (Py_TYPE(objects[i]) != common_type) {
+ common_type = NULL;
+ break;
+ }
+ }
+ Py_DECREF(foo);
+ Py_DECREF(common_type);
return PyBool_FromLong(1);
""")])
- assert module.test_fast_sequence([1, 2, 3, 4])
+ s = [1, 2, 3, 4]
+ assert module.test_fast_sequence(s[0:-1])
+ assert module.test_fast_sequence(s[::-1])
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -47,6 +47,7 @@
jl.MP_SCOPE, jl.MP_INDEX, jl.MP_OPCODE)
def get_location(next_instr, is_being_profiled, bytecode):
from pypy.tool.stdlib_opcode import opcode_method_names
+ from rpython.tool.error import offset2lineno
bcindex = ord(bytecode.co_code[next_instr])
opname = ""
if 0 <= bcindex < len(opcode_method_names):
@@ -54,7 +55,8 @@
name = bytecode.co_name
if not name:
name = ""
- return (bytecode.co_filename, bytecode.co_firstlineno,
+ line = offset2lineno(bytecode, intmask(next_instr))
+ return (bytecode.co_filename, line,
name, intmask(next_instr), opname)
def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode):
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -19,6 +19,7 @@
self.defaultencoding = "utf-8"
self.filesystemencoding = None
self.debug = True
+ self.track_resources = False
self.dlopenflags = rdynload._dlopen_default_mode()
interpleveldefs = {
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ownlib.py
@@ -130,7 +130,7 @@
cls.module = str(udir.join('testownlib.dll'))
else:
subprocess.check_call(
- 'gcc testownlib.c -shared -fPIC -o testownlib.so',
+ 'cc testownlib.c -shared -fPIC -o testownlib.so',
cwd=str(udir), shell=True)
cls.module = str(udir.join('testownlib.so'))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -852,9 +852,12 @@
assert str(e2.value) == "foo0() takes no arguments (2 given)"
assert str(e3.value) == "foo1() takes exactly one argument (0 given)"
assert str(e4.value) == "foo1() takes exactly one argument (2 given)"
- assert str(e5.value) == "foo2() takes exactly 2 arguments (0 given)"
- assert str(e6.value) == "foo2() takes exactly 2 arguments (1 given)"
- assert str(e7.value) == "foo2() takes exactly 2 arguments (3 given)"
+ assert str(e5.value) in ["foo2 expected 2 arguments, got 0",
+ "foo2() takes exactly 2 arguments (0 given)"]
+ assert str(e6.value) in ["foo2 expected 2 arguments, got 1",
+ "foo2() takes exactly 2 arguments (1 given)"]
+ assert str(e7.value) in ["foo2 expected 2 arguments, got 3",
+ "foo2() takes exactly 2 arguments (3 given)"]
def test_address_of_function():
ffi = FFI()
@@ -1916,3 +1919,47 @@
ffi.cdef("bool f(void);")
lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
assert lib.f() == 1
+
+def test_bool_in_cpp_2():
+ ffi = FFI()
+ ffi.cdef('int add(int a, int b);')
+ lib = verify(ffi, "test_bool_bug_cpp", '''
+ typedef bool _Bool; /* there is a Windows header with this line */
+ int add(int a, int b)
+ {
+ return a + b;
+ }''', source_extension='.cpp')
+ c = lib.add(2, 3)
+ assert c == 5
+
+def test_struct_field_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[2]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+ ffi = FFI()
+ ffi.cdef("struct a { struct b b[]; };")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_struct_field_opaque", "?")
+ assert str(e.value) == ("struct a: field 'a.b' is of an opaque"
+ " type (not declared in cdef())")
+
+def test_function_arg_opaque():
+ py.test.skip("can currently declare a function with an opaque struct "
+ "as argument, but AFAICT it's impossible to call it later")
+
+def test_function_returns_opaque():
+ ffi = FFI()
+ ffi.cdef("struct a foo(int);")
+ e = py.test.raises(TypeError, verify,
+ ffi, "test_function_returns_opaque", "?")
+ assert str(e.value) == ("function foo: 'struct a' is used as result type,"
+ " but is opaque")
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
@@ -133,6 +133,12 @@
# You cannot assing character format codes as restype any longer
raises(TypeError, setattr, f, "restype", "i")
+ def test_unicode_function_name(self):
+ f = dll[u'_testfunc_i_bhilfd']
+ f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
+ f.restype = c_int
+ result = f(1, 2, 3, 4, 5.0, 6.0)
+ assert result == 21
def test_truncate_python_longs(self):
f = dll._testfunc_i_bhilfd
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -159,7 +159,6 @@
libraries=rtime.libraries
)
CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC")
- clock_t = platform.SimpleType("clock_t", rffi.ULONG)
has_gettimeofday = platform.Has('gettimeofday')
has_clock_gettime = platform.Has('clock_gettime')
CLOCK_PROF = platform.DefinedConstantInteger('CLOCK_PROF')
@@ -233,7 +232,6 @@
HAS_CLOCK_MONOTONIC = cConfig.CLOCK_MONOTONIC is not None
HAS_MONOTONIC = (_WIN or _MACOSX or
(HAS_CLOCK_GETTIME and (HAS_CLOCK_HIGHRES or HAS_CLOCK_MONOTONIC)))
-clock_t = cConfig.clock_t
tm = cConfig.tm
glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True)
@@ -1030,7 +1028,10 @@
with lltype.scoped_alloc(rposix.TMS) as tms:
ret = rposix.c_times(tms)
if rffi.cast(lltype.Signed, ret) != -1:
- cpu_time = float(tms.c_tms_utime + tms.c_tms_stime)
+ cpu_time = float(rffi.cast(lltype.Signed,
+ tms.c_tms_utime) +
+ rffi.cast(lltype.Signed,
+ tms.c_tms_stime))
if w_info is not None:
_setinfo(space, w_info, "times()",
1.0 / rposix.CLOCK_TICKS_PER_SECOND,
@@ -1038,7 +1039,7 @@
return space.wrap(cpu_time / rposix.CLOCK_TICKS_PER_SECOND)
return clock(space)
-_clock = external('clock', [], clock_t)
+_clock = external('clock', [], rposix.CLOCK_T)
def clock(space, w_info=None):
"""clock() -> floating point number
@@ -1052,7 +1053,7 @@
pass
value = _clock()
# Is this casting correct?
- if value == rffi.cast(clock_t, -1):
+ if intmask(value) == intmask(rffi.cast(rposix.CLOCK_T, -1)):
raise oefmt(space.w_RuntimeError,
"the processor time used is not available or its value"
"cannot be represented")
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -930,6 +930,7 @@
abstractinst.p_recursive_isinstance_type_w(space, w_inst, w_obj))
def type_get_dict(space, w_cls):
+ w_cls = _check(space, w_cls)
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
w_dict = w_cls.getdict(space)
if w_dict is None:
@@ -1287,7 +1288,8 @@
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space) for cls in cycle]
- raise OperationError(space.w_TypeError, space.wrap(
+ # Can't use oefmt() here, since names is a list of unicodes
+ raise OperationError(space.w_TypeError, space.newunicode(
u"cycle among base classes: " + u' < '.join(names)))
diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py
--- a/pypy/tool/pytest/objspace.py
+++ b/pypy/tool/pytest/objspace.py
@@ -143,3 +143,5 @@
def is_w(self, obj1, obj2):
return obj1 is obj2
+ def setitem(self, obj, key, value):
+ obj[key] = value
diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-# hypothesis is used for test generation on untranslated jit tests
+# hypothesis is used for test generation on untranslated tests
hypothesis
enum34>=1.1.2
diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
--- a/rpython/annotator/binaryop.py
+++ b/rpython/annotator/binaryop.py
@@ -401,6 +401,9 @@
class __extend__(pairtype(SomeString, SomeTuple),
pairtype(SomeUnicodeString, SomeTuple)):
def mod((s_string, s_tuple)):
+ if not s_string.is_constant():
+ raise AnnotatorError("string formatting requires a constant "
+ "string/unicode on the left of '%'")
is_string = isinstance(s_string, SomeString)
is_unicode = isinstance(s_string, SomeUnicodeString)
assert is_string or is_unicode
diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -4623,6 +4623,14 @@
a = self.RPythonAnnotator()
a.build_types(main, [int])
+ def test_string_mod_nonconstant(self):
+ def f(x):
+ return x % 5
+ a = self.RPythonAnnotator()
+ e = py.test.raises(AnnotatorError, a.build_types, f, [str])
+ assert ('string formatting requires a constant string/unicode'
+ in str(e.value))
+
def g(n):
return [0, 1, 2, n]
diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -883,6 +883,7 @@
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ ofs_items -= 1 # for the extra null character
scale = 0
self._gen_address(resloc, baseloc, ofsloc, scale, ofs_items)
diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
--- a/rpython/jit/backend/llsupport/descr.py
+++ b/rpython/jit/backend/llsupport/descr.py
@@ -280,7 +280,7 @@
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
- self.basesize = basesize
+ self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
@@ -676,7 +676,7 @@
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
- ofs = arraydescr.basesize
+ ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -293,6 +293,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1),
itemsize, itemsize, basesize, NOT_SIGNED)
elif opnum == rop.UNICODEGETITEM:
@@ -304,6 +305,7 @@
basesize, itemsize, ofs_length = get_array_token(rstr.STR,
self.cpu.translate_support_code)
assert itemsize == 1
+ basesize -= 1 # for the extra null character
self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2),
itemsize, itemsize, basesize)
elif opnum == rop.UNICODESETITEM:
diff --git a/rpython/jit/backend/llsupport/symbolic.py b/rpython/jit/backend/llsupport/symbolic.py
--- a/rpython/jit/backend/llsupport/symbolic.py
+++ b/rpython/jit/backend/llsupport/symbolic.py
@@ -29,7 +29,7 @@
def get_array_token(T, translate_support_code):
# T can be an array or a var-sized structure
if translate_support_code:
- basesize = llmemory.sizeof(T, 0)
+ basesize = llmemory.sizeof(T, 0) # this includes +1 for STR
if isinstance(T, lltype.Struct):
SUBARRAY = getattr(T, T._arrayfld)
itemsize = llmemory.sizeof(SUBARRAY.OF)
@@ -57,6 +57,7 @@
assert carray.length.size == WORD
ofs_length = before_array_part + carray.length.offset
basesize = before_array_part + carray.items.offset
+ basesize += T._hints.get('extra_item_after_alloc', 0) # +1 for STR
carrayitem = ll2ctypes.get_ctypes_type(T.OF)
itemsize = ctypes.sizeof(carrayitem)
return basesize, itemsize, ofs_length
diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py
--- a/rpython/jit/backend/llsupport/test/test_descr.py
+++ b/rpython/jit/backend/llsupport/test/test_descr.py
@@ -435,8 +435,10 @@
def test_bytearray_descr():
c0 = GcCache(False)
descr = get_array_descr(c0, rstr.STR) # for bytearray
+ # note that we get a basesize that has 1 extra byte for the final null char
+ # (only for STR)
assert descr.flag == FLAG_UNSIGNED
- assert descr.basesize == struct.calcsize("PP") # hash, length
+ assert descr.basesize == struct.calcsize("PP") + 1 # hash, length, extra
assert descr.lendescr.offset == struct.calcsize("P") # hash
assert not descr.is_array_of_pointers()
diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py
--- a/rpython/jit/backend/llsupport/test/test_rewrite.py
+++ b/rpython/jit/backend/llsupport/test/test_rewrite.py
@@ -647,6 +647,9 @@
""")
def test_rewrite_assembler_newstr_newunicode(self):
+ # note: strdescr.basesize already contains the extra final character,
+ # so that's why newstr(14) is rounded up to 'basesize+15' and not
+ # 'basesize+16'.
self.check_rewrite("""
[i2]
p0 = newstr(14)
@@ -657,12 +660,12 @@
""", """
[i2]
p0 = call_malloc_nursery( \
- %(strdescr.basesize + 16 * strdescr.itemsize + \
+ %(strdescr.basesize + 15 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s)
gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s)
gc_store(p0, 0, 0, %(strhashdescr.field_size)s)
- p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
+ p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 15 * strdescr.itemsize)d)
gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s)
gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s)
gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s)
@@ -1240,14 +1243,14 @@
# 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'],
[True, (4,), 'i3 = strgetitem(p0,i1)' '->'
'i3 = gc_load_indexed_i(p0,i1,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
#[False, (4,), 'i3 = strgetitem(p0,i1)' '->'
- # 'i5 = int_add(i1, %(strdescr.basesize)d);'
+ # 'i5 = int_add(i1, %(strdescr.basesize-1)d);'
# 'i3 = gc_load_i(p0,i5,1)'],
## setitem str/unicode
[True, (4,), 'i3 = strsetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,1,'
- '%(strdescr.basesize)d,1)'],
+ '%(strdescr.basesize-1)d,1)'],
[True, (2,4), 'i3 = unicodesetitem(p0,i1,0)' '->'
'i3 = gc_store_indexed(p0,i1,0,'
'%(unicodedescr.itemsize)d,'
diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py
--- a/rpython/jit/backend/llsupport/test/ztranslation_test.py
+++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py
@@ -3,7 +3,7 @@
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
-from rpython.rlib import jit_hooks, rposix
+from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
@@ -11,7 +11,7 @@
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
@@ -29,6 +29,7 @@
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
+ # - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
@@ -56,7 +57,7 @@
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
- reds = ['total', 'frame', 'j'],
+ reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
@@ -68,9 +69,12 @@
total = 0
frame = Frame(i)
j = float(j)
+ prev_s = rstr.mallocstr(16)
From pypy.commits at gmail.com Tue Aug 16 05:20:56 2016
From: pypy.commits at gmail.com (raffael_t)
Date: Tue, 16 Aug 2016 02:20:56 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-async: Fix async with test,
fixes timing for messages expected in assert
Message-ID: <57b2daf8.8aacc20a.ea595.9df1@mx.google.com>
Author: Raffael Tfirst
Branch: py3.5-async
Changeset: r86214:ce0987e21403
Date: 2016-08-16 11:20 +0200
http://bitbucket.org/pypy/pypy/changeset/ce0987e21403/
Log: Fix async with test, fixes timing for messages expected in assert
diff --git a/pypy/module/_asyncio/test/test_asyncio.py b/pypy/module/_asyncio/test/test_asyncio.py
--- a/pypy/module/_asyncio/test/test_asyncio.py
+++ b/pypy/module/_asyncio/test/test_asyncio.py
@@ -44,5 +44,11 @@
finally:
loop.close()
-assert cor.res == "- coro 1: waiting for lock - coro 1: holding the lock - coro 2: waiting for lock - coro 1: releasing the lock - coro 2: holding the lock - coro 2: releasing the lock -"
+assert "coro 1: waiting for lock" in cor.res
+assert "coro 1: holding the lock" in cor.res
+assert "coro 1: releasing the lock" in cor.res
+assert "coro 2: waiting for lock" in cor.res
+assert "coro 2: holding the lock" in cor.res
+assert "coro 2: releasing the lock" in cor.res
+assert cor.res.find("coro 1: releasing the lock") < cor.res.find("coro 2: holding the lock")
"""
From pypy.commits at gmail.com Tue Aug 16 05:56:28 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 02:56:28 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-memoryview: hex method for bytes and
bytearray + test
Message-ID: <57b2e34c.0205c20a.a1731.ad04@mx.google.com>
Author: Richard Plangger
Branch: py3.5-memoryview
Changeset: r86215:75e21c6f7535
Date: 2016-08-16 11:55 +0200
http://bitbucket.org/pypy/pypy/changeset/75e21c6f7535/
Log: hex method for bytes and bytearray + test
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -441,6 +441,9 @@
def descr_copy(self, space):
return self._new(self.data[:])
+ def descr_hex(self, space):
+ return _array_to_hexstring(space, self.data, len(self.data), True)
+
# ____________________________________________________________
# helpers for slow paths, moved out because they contain loops
@@ -494,15 +497,22 @@
HEXDIGITS = "0123456789abcdef"
PY_SIZE_T_MAX = 2**(rffi.sizeof(rffi.SIZE_T)*8)-1
-def _array_to_hexstring(space, buf):
- length = buf.getlength()
+ at specialize.arg(3) # raw access
+def _array_to_hexstring(space, buf, len=0, rawaccess=False):
+ if rawaccess:
+ length = len
+ else:
+ length = buf.getlength()
hexstring = StringBuilder(length*2)
if length > PY_SIZE_T_MAX/2:
raise OperationError(space.w_MemoryError)
for i in range(length):
- byte = ord(buf.getitem(i))
+ if rawaccess:
+ byte = ord(buf[i])
+ else:
+ byte = ord(buf.getitem(i))
c = (byte >> 4 & 0xf)
hexstring.append(HEXDIGITS[c])
c = (byte & 0xf)
@@ -944,6 +954,12 @@
of the specified width. B is never truncated.
"""
+ def hex():
+ """B.hex() -> unicode
+ Return a string object containing two hexadecimal digits
+ for each byte in the instance B.
+ """
+
W_BytearrayObject.typedef = TypeDef(
"bytearray",
@@ -1093,6 +1109,8 @@
doc=BytearrayDocstrings.clear.__doc__),
copy = interp2app(W_BytearrayObject.descr_copy,
doc=BytearrayDocstrings.copy.__doc__),
+ hex = interp2app(W_BytearrayObject.descr_hex,
+ doc=BytearrayDocstrings.hex.__doc__),
)
W_BytearrayObject.typedef.flag_sequence_bug_compat = True
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -394,6 +394,12 @@
of the specified width. The string S is never truncated.
"""
+ def descr_hex(self, space):
+ """S.hex() -> string
+
+ Creates a hexadecimal string of the bytes object
+ """
+
class W_BytesObject(W_AbstractBytesObject):
import_from_mixin(StringMethods)
@@ -648,6 +654,11 @@
def descr_upper(self, space):
return W_BytesObject(self._value.upper())
+ def descr_hex(self, space):
+ from pypy.objspace.std.bytearrayobject import _array_to_hexstring
+ return _array_to_hexstring(space, self.buffer_w(space, space.BUF_SIMPLE))
+
+
def _create_list_from_bytes(value):
# need this helper function to allow the jit to look inside and inline
@@ -827,6 +838,7 @@
fromhex = interp2app(W_BytesObject.descr_fromhex, as_classmethod=True),
maketrans = interp2app(W_BytesObject.descr_maketrans, as_classmethod=True),
+ hex = interp2app(W_BytesObject.descr_hex)
)
W_BytesObject.typedef.flag_sequence_bug_compat = True
diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py
--- a/pypy/objspace/std/test/test_bytearrayobject.py
+++ b/pypy/objspace/std/test/test_bytearrayobject.py
@@ -523,3 +523,7 @@
result = bytearray.maketrans(b'abc', b'xyz')
assert result == table
assert type(result) is bytes
+
+ def test_hex(self):
+ assert bytearray(b'santa claus').hex() == "73616e746120636c617573"
+
diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py
--- a/pypy/objspace/std/test/test_bytesobject.py
+++ b/pypy/objspace/std/test/test_bytesobject.py
@@ -870,3 +870,10 @@
def __int__(self):
return 42
raises(TypeError, bytes, A())
+
+ def test_hex(self):
+ assert bytes('santa claus', 'ascii').hex() == "73616e746120636c617573"
+ assert bytes([0x73,0x61,0x6e,0x74,0x61,0x20,0x63,0x6c,0x61,0x75,0x73]).hex() == \
+ "73616e746120636c617573"
+ assert bytes(64).hex() == "00"*64
+
From pypy.commits at gmail.com Tue Aug 16 06:02:42 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 03:02:42 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-memoryview: 32bit translation issue,
prebuilt long
Message-ID: <57b2e4c2.8bc71c0a.8c8f4.f1b0@mx.google.com>
Author: Richard Plangger
Branch: py3.5-memoryview
Changeset: r86216:42637c644eab
Date: 2016-08-16 12:01 +0200
http://bitbucket.org/pypy/pypy/changeset/42637c644eab/
Log: 32bit translation issue, prebuilt long
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -3,6 +3,7 @@
from rpython.rlib.objectmodel import (
import_from_mixin, newlist_hint, resizelist_hint, specialize)
from rpython.rlib.buffer import Buffer
+from rpython.rlib.rarithmetic import intmask
from rpython.rlib.rstring import StringBuilder, ByteListBuilder
from rpython.rlib.debug import check_list_of_chars
from rpython.rtyper.lltypesystem import rffi
@@ -495,7 +496,7 @@
return data
HEXDIGITS = "0123456789abcdef"
-PY_SIZE_T_MAX = 2**(rffi.sizeof(rffi.SIZE_T)*8)-1
+PY_SIZE_T_MAX = intmask(2**(rffi.sizeof(rffi.SIZE_T)*8)-1)
@specialize.arg(3) # raw access
def _array_to_hexstring(space, buf, len=0, rawaccess=False):
From pypy.commits at gmail.com Tue Aug 16 06:09:30 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 03:09:30 -0700 (PDT)
Subject: [pypy-commit] pypy py3.5-memoryview: missing argument to operation
error
Message-ID: <57b2e65a.81a2c20a.2c7cc.b383@mx.google.com>
Author: Richard Plangger
Branch: py3.5-memoryview
Changeset: r86217:3045edf9288b
Date: 2016-08-16 12:05 +0200
http://bitbucket.org/pypy/pypy/changeset/3045edf9288b/
Log: missing argument to operation error
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -507,7 +507,7 @@
hexstring = StringBuilder(length*2)
if length > PY_SIZE_T_MAX/2:
- raise OperationError(space.w_MemoryError)
+ raise OperationError(space.w_MemoryError, space.w_None)
for i in range(length):
if rawaccess:
From pypy.commits at gmail.com Tue Aug 16 06:12:00 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 03:12:00 -0700 (PDT)
Subject: [pypy-commit] pypy ppc-vsx-support: further implementing vec
unpack/pack on ppc
Message-ID: <57b2e6f0.85c11c0a.b917.ee10@mx.google.com>
Author: Richard Plangger
Branch: ppc-vsx-support
Changeset: r86218:d41babc2d791
Date: 2016-08-16 12:11 +0200
http://bitbucket.org/pypy/pypy/changeset/d41babc2d791/
Log: further implementing vec unpack/pack on ppc
diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py
--- a/rpython/jit/backend/ppc/codebuilder.py
+++ b/rpython/jit/backend/ppc/codebuilder.py
@@ -660,7 +660,7 @@
# splat high of A, and high of B
xxspltdh = XX3_splat(60, XO13=10, OE=0, DM=0b11)
# generic splat
- xxspltd = XX3_splat(60, XO13=10, OE=0)
+ xxpermdi = XX3_splat(60, XO13=10, OE=0)
xxlxor = XX3(60, XO9=154)
xxlor = XX3(60, XO9=146)
diff --git a/rpython/jit/backend/ppc/vector_ext.py b/rpython/jit/backend/ppc/vector_ext.py
--- a/rpython/jit/backend/ppc/vector_ext.py
+++ b/rpython/jit/backend/ppc/vector_ext.py
@@ -21,6 +21,7 @@
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.jit.codewriter import longlong
from rpython.jit.backend.ppc.detect_feature import detect_vsx
+from rpython.rlib.objectmodel import always_inline
def not_implemented(msg):
msg = '[ppc/vector_ext] %s\n' % msg
@@ -28,6 +29,24 @@
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
+ at always_inline
+def permi(v1, v2):
+ """ permute immediate for big and little endian """
+ # if v1 == 0 unpacks index 0 of param 1
+ # if v1 == 1 unpacks index 1 of param 1
+ # if v2 == 0 unpacks index 0 of param 2
+ # if v2 == 1 unpacks index 1 of param 2
+ mask = 0
+ if IS_BIG_ENDIAN:
+ not_implemented("no big endian support (yet)")
+ else:
+ if v1 == 0: mask |= 0b01
+ if v1 == 1: mask |= 0b00
+ if v2 == 0: mask |= 0b10
+ if v2 == 1: mask |= 0b00
+ return mask
+
+
def flush_vec_cc(asm, regalloc, condition, size, result_loc):
# After emitting an instruction that leaves a boolean result in
# a condition code (cc), call this. In the common case, result_loc
@@ -448,9 +467,12 @@
size = op.bytesize
if size == 8:
if resultloc.is_vector_reg(): # vector <- reg
- self.mc.load_imm(r.SCRATCH, PARAM_SAVE_AREA_OFFSET)
+ self.mc.load_imm(r.SCRATCH2, PARAM_SAVE_AREA_OFFSET)
self.mc.stvx(vector, r.SCRATCH2.value, r.SP.value)
- self.mc.store(src, r.SP.value, PARAM_SAVE_AREA_OFFSET+8*residx)
+ idx = residx
+ if not IS_BIG_ENDIAN:
+ idx = 1 - idx
+ self.mc.store(src, r.SP.value, PARAM_SAVE_AREA_OFFSET+8*idx)
self.mc.lvx(res, r.SCRATCH2.value, r.SP.value)
else:
not_implemented("64 bit float")
@@ -463,29 +485,32 @@
def emit_vec_unpack_i(self, op, arglocs, regalloc):
assert isinstance(op, VectorOp)
- resloc, srcloc, idxloc, countloc = arglocs
+ resloc, srcloc, idxloc, countloc, sizeloc = arglocs
idx = idxloc.value
res = resloc.value
src = srcloc.value
- size = op.bytesize
+ size = sizeloc.value
count = countloc.value
if count == 1:
assert srcloc.is_vector_reg()
assert not resloc.is_vector_reg()
off = PARAM_SAVE_AREA_OFFSET
self.mc.load_imm(r.SCRATCH2, off)
- off = off + size*idx
self.mc.stvx(src, r.SCRATCH2.value, r.SP.value)
+ off = off + size * idx
if size == 8:
self.mc.load(res, r.SP.value, off+size*idx)
+ return
elif size == 4:
self.mc.lwa(res, r.SP.value, off)
+ return
elif size == 2:
self.mc.lha(res, r.SP.value, off)
+ return
elif size == 1:
self.mc.lbz(res, r.SP.value, off)
self.mc.extsb(res, res)
- return
+ return
not_implemented("%d bit integer, count %d" % \
(size*8, count))
@@ -500,42 +525,44 @@
residx = residxloc.value
srcidx = srcidxloc.value
size = op.bytesize
- assert size == 8
# srcloc is always a floating point register f, this means it is
# vsr[0] == valueof(f)
if srcidx == 0:
if residx == 0:
- # r = (s[0], r[1])
- if IS_BIG_ENDIAN:
- self.mc.xxspltd(res, src, vec, 0b10)
- else:
- self.mc.xxspltd(res, src, vec, 0b01)
+ # r = (s[0], v[1])
+ self.mc.xxpermdi(res, src, vec, permi(0,1))
else:
assert residx == 1
- # r = (r[0], s[0])
- if IS_BIG_ENDIAN:
- self.mc.xxspltd(res, vec, src, 0b00)
- else:
- self.mc.xxspltd(res, vec, src, 0b11)
+ # r = (v[0], s[0])
+ self.mc.xxpermdi(res, vec, src, permi(1,1))
else:
assert srcidx == 1
if residx == 0:
- # r = (s[1], r[1])
- if IS_BIG_ENDIAN:
- self.mc.xxspltd(res, src, vec, 0b11)
- else:
- self.mc.xxspltd(res, src, vec, 0b00)
+ # r = (s[1], v[1])
+ self.mc.xxpermdi(res, src, vec, permi(1,1))
else:
assert residx == 1
- # r = (r[0], s[1])
- if IS_BIG_ENDIAN:
- self.mc.xxspltd(res, vec, src, 0b10)
- else:
- self.mc.xxspltd(res, vec, src, 0b01)
+ # r = (v[0], s[1])
+ self.mc.xxpermdi(res, vec, src, permi(0,1))
def emit_vec_unpack_f(self, op, arglocs, regalloc):
- resloc, srcloc, idxloc, countloc = arglocs
- self.emit_vec_pack_f(op, [resloc, srcloc, srcloc, imm(0), idxloc, countloc], regalloc)
+ assert isinstance(op, VectorOp)
+ resloc, srcloc, srcidxloc, countloc = arglocs
+ res = resloc.value
+ src = srcloc.value
+ srcidx = srcidxloc.value
+ size = op.bytesize
+ # srcloc is always a floating point register f, this means it is
+ # vsr[0] == valueof(f)
+ if srcidx == 0:
+ # r = (s[0], s[1])
+ self.mc.xxpermdi(res, src, src, permi(0,1))
+ return
+ else:
+ # r = (s[1], s[0])
+ self.mc.xxpermdi(res, src, src, permi(1,0))
+ return
+ not_implemented("unpack for combination src %d -> res %d" % (srcidx, residx))
def emit_vec_cast_float_to_int(self, op, arglocs, regalloc):
res, l0 = arglocs
@@ -700,7 +727,10 @@
assert not arg.is_vector()
srcloc = self.ensure_reg(arg)
vloc = self.ensure_vector_reg(op.getarg(0))
- resloc = self.force_allocate_vector_reg(op)
+ if op.is_vector():
+ resloc = self.force_allocate_vector_reg(op)
+ else:
+ resloc = self.force_allocate_reg(op)
residx = index.value # where to put it in result?
srcidx = 0
return [resloc, vloc, srcloc, imm(residx), imm(srcidx), imm(count.value)]
@@ -722,11 +752,13 @@
assert isinstance(count, ConstInt)
arg = op.getarg(0)
if arg.is_vector():
- srcloc = self.ensure_vector_reg(op.getarg(0))
+ srcloc = self.ensure_vector_reg(arg)
else:
- srcloc = self.ensure_reg(op.getarg(0))
+ # unpack
+ srcloc = self.ensure_reg(arg0)
+ size = arg.bytesize
resloc = self.force_allocate_reg(op)
- return [resloc, srcloc, imm(index.value), imm(count.value)]
+ return [resloc, srcloc, imm(index.value), imm(count.value), imm(size)]
def expand_float(self, size, box):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -743,7 +743,7 @@
pack.append('%s = vec_%s()' % (v, suffix))
for i,val in enumerate(vals):
args_values.append(val)
- f = newvar('f')
+ f = newvar(suffix)
args.append(f)
count = 1
# create a new variable
@@ -789,6 +789,7 @@
#
looptoken = JitCellToken()
cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
+ import pdb; pdb.set_trace()
deadframe = cpu.execute_token(looptoken, *args_values)
print(source)
if float:
@@ -796,26 +797,30 @@
else:
return cpu.get_int_value(deadframe, 0)
- def test_unpack(self):
+ def test_unpack_f(self):
# double unpack
assert self.run_unpack("f{f} = vec_unpack_f({x}, 0, 1)",
- "[2xf64]", {'x': (1.2,-1)}) == 1.2
+ "[2xf64]", {'x': (1.2,-1.0)}) == 1.2
assert self.run_unpack("f{f} = vec_unpack_f({x}, 1, 1)",
"[2xf64]", {'x': (50.33,4321.0)}) == 4321.0
+ def test_unpack_i64(self):
# int64
assert self.run_unpack("i{i} = vec_unpack_i({x}, 0, 1)",
"[2xi64]", {'x': (11,12)}, float=False) == 11
assert self.run_unpack("i{i} = vec_unpack_i({x}, 1, 1)",
"[2xi64]", {'x': (14,15)}, float=False) == 15
- ## integer unpack (byte)
+ def test_unpack_i(self):
for i in range(16):
+ # i8
op = "i{i} = vec_unpack_i({x}, %d, 1)" % i
assert self.run_unpack(op, "[16xi8]", {'x': [127,1]*8}, float=False) == \
(127 if i%2==0 else 1)
+ # i16
if i < 8:
assert self.run_unpack(op, "[8xi16]", {'x': [2**15-1,0]*4}, float=False) == \
(2**15-1 if i%2==0 else 0)
+ # i32
if i < 4:
assert self.run_unpack(op, "[4xi32]", {'x': [2**31-1,0]*4}, float=False) == \
(2**31-1 if i%2==0 else 0)
From pypy.commits at gmail.com Tue Aug 16 06:56:35 2016
From: pypy.commits at gmail.com (plan_rich)
Date: Tue, 16 Aug 2016 03:56:35 -0700 (PDT)
Subject: [pypy-commit] pypy default: encode the jitdrivers name in
start_trace, jitlog version bump
Message-ID: <57b2f163.497bc20a.13214.c014@mx.google.com>
Author: Richard Plangger
Branch:
Changeset: r86219:5847abf293f8
Date: 2016-08-04 16:32 +0200
http://bitbucket.org/pypy/pypy/changeset/5847abf293f8/
Log: encode the jitdrivers name in start_trace, jitlog version bump
diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -1051,8 +1051,9 @@
metainterp_sd = metainterp.staticdata
jitdriver_sd = metainterp.jitdriver_sd
#
+ jd_name = jitdriver_sd.jitdriver.name
metainterp_sd.jitlog.start_new_trace(metainterp_sd,
- faildescr=resumekey, entry_bridge=False)
+ faildescr=resumekey, entry_bridge=False, jd_name=jd_name)
#
if isinstance(resumekey, ResumeAtPositionDescr):
inline_short_preamble = False
diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py
--- a/rpython/rlib/rjitlog/rjitlog.py
+++ b/rpython/rlib/rjitlog/rjitlog.py
@@ -212,7 +212,7 @@
return method
return decor
-JITLOG_VERSION = 1
+JITLOG_VERSION = 2
JITLOG_VERSION_16BIT_LE = struct.pack("