[pypy-commit] pypy stm-gc: hg merge default
arigo
noreply at buildbot.pypy.org
Thu Apr 26 10:46:31 CEST 2012
Author: Armin Rigo <arigo at tunes.org>
Branch: stm-gc
Changeset: r54762:f7093e6634de
Date: 2012-04-26 10:45 +0200
http://bitbucket.org/pypy/pypy/changeset/f7093e6634de/
Log: hg merge default
diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py
--- a/lib_pypy/_ctypes_test.py
+++ b/lib_pypy/_ctypes_test.py
@@ -21,7 +21,7 @@
# Compile .c file
include_dir = os.path.join(thisdir, '..', 'include')
if sys.platform == 'win32':
- ccflags = []
+ ccflags = ['-D_CRT_SECURE_NO_WARNINGS']
else:
ccflags = ['-fPIC']
res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')],
@@ -34,6 +34,13 @@
if sys.platform == 'win32':
# XXX libpypy-c.lib is currently not installed automatically
library = os.path.join(thisdir, '..', 'include', 'libpypy-c')
+ if not os.path.exists(library + '.lib'):
+ #For a nightly build
+ library = os.path.join(thisdir, '..', 'include', 'python27')
+ if not os.path.exists(library + '.lib'):
+ # For a local translation
+ library = os.path.join(thisdir, '..', 'pypy', 'translator',
+ 'goal', 'libpypy-c')
libraries = [library, 'oleaut32']
extra_ldargs = ['/MANIFEST'] # needed for VC10
else:
diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py
--- a/lib_pypy/_testcapi.py
+++ b/lib_pypy/_testcapi.py
@@ -16,7 +16,7 @@
# Compile .c file
include_dir = os.path.join(thisdir, '..', 'include')
if sys.platform == 'win32':
- ccflags = []
+ ccflags = ['-D_CRT_SECURE_NO_WARNINGS']
else:
ccflags = ['-fPIC', '-Wimplicit-function-declaration']
res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')],
@@ -29,6 +29,13 @@
if sys.platform == 'win32':
# XXX libpypy-c.lib is currently not installed automatically
library = os.path.join(thisdir, '..', 'include', 'libpypy-c')
+ if not os.path.exists(library + '.lib'):
+ #For a nightly build
+ library = os.path.join(thisdir, '..', 'include', 'python27')
+ if not os.path.exists(library + '.lib'):
+ # For a local translation
+ library = os.path.join(thisdir, '..', 'pypy', 'translator',
+ 'goal', 'libpypy-c')
libraries = [library, 'oleaut32']
extra_ldargs = ['/MANIFEST', # needed for VC10
'/EXPORT:init_testcapi']
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/cppyy.rst
@@ -0,0 +1,554 @@
+============================
+cppyy: C++ bindings for PyPy
+============================
+
+The cppyy module provides C++ bindings for PyPy by using the reflection
+information extracted from C++ header files by means of the
+`Reflex package`_.
+For this to work, you have to both install Reflex and build PyPy from the
+reflex-support branch.
+As indicated by this being a branch, support for Reflex is still
+experimental.
+However, it is functional enough to put it in the hands of those who want
+to give it a try.
+In the medium term, cppyy will move away from Reflex and instead use
+`cling`_ as its backend, which is based on `llvm`_.
+Although that will change the logistics on the generation of reflection
+information, it will not change the python-side interface.
+
+.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex
+.. _`cling`: http://root.cern.ch/drupal/content/cling
+.. _`llvm`: http://llvm.org/
+
+
+Installation
+============
+
+For now, the easiest way of getting the latest version of Reflex, is by
+installing the ROOT package.
+Besides getting the latest version of Reflex, another advantage is that with
+the full ROOT package, you can also use your Reflex-bound code on `CPython`_.
+`Download`_ a binary or install from `source`_.
+Some Linux and Mac systems may have ROOT provided in the list of scientific
+software of their packager.
+A current, standalone version of Reflex should be provided at some point,
+once the dependencies and general packaging have been thought out.
+Also, make sure you have a version of `gccxml`_ installed, which is most
+easily provided by the packager of your system.
+If you read up on gccxml, you'll probably notice that it is no longer being
+developed and hence will not provide C++11 support.
+That's why the medium term plan is to move to `cling`_.
+
+.. _`Download`: http://root.cern.ch/drupal/content/downloading-root
+.. _`source`: http://root.cern.ch/drupal/content/installing-root-source
+.. _`gccxml`: http://www.gccxml.org
+
+Next, get the `PyPy sources`_, select the reflex-support branch, and build
+pypy-c.
+For the build to succeed, the ``$ROOTSYS`` environment variable must point to
+the location of your ROOT installation::
+
+ $ hg clone https://bitbucket.org/pypy/pypy
+ $ cd pypy
+ $ hg up reflex-support
+ $ cd pypy/translator/goal
+ $ python translate.py -O jit --gcrootfinder=shadowstack targetpypystandalone.py --withmod-cppyy
+
+This will build a ``pypy-c`` that includes the cppyy module, and through that,
+Reflex support.
+Of course, if you already have a pre-built version of the ``pypy`` interpreter,
+you can use that for the translation rather than ``python``.
+
+.. _`PyPy sources`: https://bitbucket.org/pypy/pypy/overview
+
+
+Basic example
+=============
+
+Now test with a trivial example whether all packages are properly installed
+and functional.
+First, create a C++ header file with some class in it (note that all functions
+are made inline for convenience; a real-world example would of course have a
+corresponding source file)::
+
+ $ cat MyClass.h
+ class MyClass {
+ public:
+ MyClass(int i = -99) : m_myint(i) {}
+
+ int GetMyInt() { return m_myint; }
+ void SetMyInt(int i) { m_myint = i; }
+
+ public:
+ int m_myint;
+ };
+
+Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the
+code::
+
+ $ genreflex MyClass.h
+ $ g++ -fPIC -rdynamic -O2 -shared -I$ROOTSYS/include MyClass_rflx.cpp -o libMyClassDict.so
+
+Now you're ready to use the bindings.
+Since the bindings are designed to look pythonistic, it should be
+straightforward::
+
+ $ pypy-c
+ >>>> import cppyy
+ >>>> cppyy.load_reflection_info("libMyClassDict.so")
+ <CPPLibrary object at 0xb6fd7c4c>
+ >>>> myinst = cppyy.gbl.MyClass(42)
+ >>>> print myinst.GetMyInt()
+ 42
+ >>>> myinst.SetMyInt(33)
+ >>>> print myinst.m_myint
+ 33
+ >>>> myinst.m_myint = 77
+ >>>> print myinst.GetMyInt()
+ 77
+ >>>> help(cppyy.gbl.MyClass) # shows that normal python introspection works
+
+That's all there is to it!
+
+
+Advanced example
+================
+The following snippet of C++ is very contrived, to allow showing that such
+pathological code can be handled and to show how certain features play out in
+practice::
+
+ $ cat MyAdvanced.h
+ #include <string>
+
+ class Base1 {
+ public:
+ Base1(int i) : m_i(i) {}
+ virtual ~Base1() {}
+ int m_i;
+ };
+
+ class Base2 {
+ public:
+ Base2(double d) : m_d(d) {}
+ virtual ~Base2() {}
+ double m_d;
+ };
+
+ class C;
+
+ class Derived : public virtual Base1, public virtual Base2 {
+ public:
+ Derived(const std::string& name, int i, double d) : Base1(i), Base2(d), m_name(name) {}
+ virtual C* gimeC() { return (C*)0; }
+ std::string m_name;
+ };
+
+ Base1* BaseFactory(const std::string& name, int i, double d) {
+ return new Derived(name, i, d);
+ }
+
+This code is still only in a header file, with all functions inline, for
+convenience of the example.
+If the implementations live in a separate source file or shared library, the
+only change needed is to link those in when building the reflection library.
+
+If you were to run ``genreflex`` like above in the basic example, you will
+find that not all classes of interest will be reflected, nor will be the
+global factory function.
+In particular, ``std::string`` will be missing, since it is not defined in
+this header file, but in a header file that is included.
+In practical terms, general classes such as ``std::string`` should live in a
+core reflection set, but for the moment assume we want to have it in the
+reflection library that we are building for this example.
+
+The ``genreflex`` script can be steered using a so-called `selection file`_,
+which is a simple XML file specifying, either explicitly or by using a
+pattern, which classes, variables, namespaces, etc. to select from the given
+header file.
+With the aid of a selection file, a large project can be easily managed:
+simply ``#include`` all relevant headers into a single header file that is
+handed to ``genreflex``.
+Then, apply a selection file to pick up all the relevant classes.
+For our purposes, the following rather straightforward selection will do
+(the name ``lcgdict`` for the root is historical, but required)::
+
+ $ cat MyAdvanced.xml
+ <lcgdict>
+ <class pattern="Base?" />
+ <class name="Derived" />
+ <class name="std::string" />
+ <function name="BaseFactory" />
+ </lcgdict>
+
+.. _`selection file`: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+
+Now the reflection info can be generated and compiled::
+
+ $ genreflex MyAdvanced.h --selection=MyAdvanced.xml
+ $ g++ -fPIC -rdynamic -O2 -shared -I$ROOTSYS/include MyAdvanced_rflx.cpp -o libAdvExDict.so
+
+and subsequently be used from PyPy::
+
+ >>>> import cppyy
+ >>>> cppyy.load_reflection_info("libAdvExDict.so")
+ <CPPLibrary object at 0x00007fdb48fc8120>
+ >>>> d = cppyy.gbl.BaseFactory("name", 42, 3.14)
+ >>>> type(d)
+ <class '__main__.Derived'>
+ >>>> d.m_i
+ 42
+ >>>> d.m_d
+ 3.14
+ >>>> d.m_name == "name"
+ True
+ >>>>
+
+Again, that's all there is to it!
+
+A couple of things to note, though.
+If you look back at the C++ definition of the ``BaseFactory`` function,
+you will see that it declares the return type to be a ``Base1``, yet the
+bindings return an object of the actual type ``Derived``?
+This choice is made for a couple of reasons.
+First, it makes method dispatching easier: if bound objects are always their
+most derived type, then it is easy to calculate any offsets, if necessary.
+Second, it makes memory management easier: the combination of the type and
+the memory address uniquely identifies an object.
+That way, it can be recycled and object identity can be maintained if it is
+entered as a function argument into C++ and comes back to PyPy as a return
+value.
+Last, but not least, casting is decidedly unpythonistic.
+By always providing the most derived type known, casting becomes unnecessary.
+For example, the data member of ``Base2`` is simply directly available.
+Note also that the unreflected ``gimeC`` method of ``Derived`` does not
+preclude its use.
+It is only the ``gimeC`` method that is unusable as long as class ``C`` is
+unknown to the system.
+
+
+Features
+========
+
+The following is not meant to be an exhaustive list, since cppyy is still
+under active development.
+Furthermore, the intention is that every feature is as natural as possible on
+the python side, so if you find something missing in the list below, simply
+try it out.
+It is not always possible to provide exact mapping between python and C++
+(active memory management is one such case), but by and large, if the use of a
+feature does not strike you as obvious, it is more likely to simply be a bug.
+That is a strong statement to make, but also a worthy goal.
+
+* **abstract classes**: Are represented as python classes, since they are
+ needed to complete the inheritance hierarchies, but will raise an exception
+ if an attempt is made to instantiate from them.
+
+* **arrays**: Supported for builtin data types only, as used from module
+ ``array``.
+ Out-of-bounds checking is limited to those cases where the size is known at
+ compile time (and hence part of the reflection info).
+
+* **builtin data types**: Map onto the expected equivalent python types, with
+ the caveat that there may be size differences, and thus it is possible that
+ exceptions are raised if an overflow is detected.
+
+* **casting**: Is supposed to be unnecessary.
+ Object pointer returns from functions provide the most derived class known
+ in the hierarchy of the object being returned.
+ This is important to preserve object identity as well as to make casting,
+ a pure C++ feature after all, superfluous.
+
+* **classes and structs**: Get mapped onto python classes, where they can be
+ instantiated as expected.
+ If classes are inner classes or live in a namespace, their naming and
+ location will reflect that.
+
+* **data members**: Public data members are represented as python properties
+ and provide read and write access on instances as expected.
+
+* **default arguments**: C++ default arguments work as expected, but python
+ keywords are not supported.
+ It is technically possible to support keywords, but for the C++ interface,
+ the formal argument names have no meaning and are not considered part of the
+ API, hence it is not a good idea to use keywords.
+
+* **doc strings**: The doc string of a method or function contains the C++
+ arguments and return types of all overloads of that name, as applicable.
+
+* **enums**: Are translated as ints with no further checking.
+
+* **functions**: Work as expected and live in their appropriate namespace
+ (which can be the global one, ``cppyy.gbl``).
+
+* **inheritance**: All combinations of inheritance on the C++ (single,
+ multiple, virtual) are supported in the binding.
+ However, new python classes can only use single inheritance from a bound C++
+ class.
+ Multiple inheritance would introduce two "this" pointers in the binding.
+ This is a current, not a fundamental, limitation.
+ The C++ side will not see any overridden methods on the python side, as
+ cross-inheritance is planned but not yet supported.
+
+* **methods**: Are represented as python methods and work as expected.
+ They are first class objects and can be bound to an instance.
+ Virtual C++ methods work as expected.
+ To select a specific virtual method, do like with normal python classes
+ that override methods: select it from the class that you need, rather than
+ calling the method on the instance.
+
+* **namespaces**: Are represented as python classes.
+ Namespaces are more open-ended than classes, so sometimes initial access may
+ result in updates as data and functions are looked up and constructed
+ lazily.
+ Thus the result of ``dir()`` on a namespace should not be relied upon: it
+ only shows the already accessed members. (TODO: to be fixed by implementing
+ __dir__.)
+ The global namespace is ``cppyy.gbl``.
+
+* **operator conversions**: If defined in the C++ class and a python
+ equivalent exists (i.e. all builtin integer and floating point types, as well
+ as ``bool``), it will map onto that python conversion.
+ Note that ``char*`` is mapped onto ``__str__``.
+
+* **operator overloads**: If defined in the C++ class and if a python
+ equivalent is available (not always the case, think e.g. of ``operator||``),
+ then they work as expected.
+ Special care needs to be taken for global operator overloads in C++: first,
+ make sure that they are actually reflected, especially for the global
+ overloads for ``operator==`` and ``operator!=`` of STL iterators in the case
+ of gcc.
+ Second, make sure that reflection info is loaded in the proper order.
+ I.e. that these global overloads are available before use.
+
+* **pointers**: For builtin data types, see arrays.
+ For objects, a pointer to an object and an object looks the same, unless
+ the pointer is a data member.
+ In that case, assigning to the data member will cause a copy of the pointer
+ and care should be taken about the object's life time.
+ If a pointer is a global variable, the C++ side can replace the underlying
+ object and the python side will immediately reflect that.
+
+* **static data members**: Are represented as python property objects on the
+ class and the meta-class.
+ Both read and write access is as expected.
+
+* **static methods**: Are represented as python's ``staticmethod`` objects
+ and can be called both from the class as well as from instances.
+
+* **strings**: The std::string class is considered a builtin C++ type and
+ mixes quite well with python's str.
+ Python's str can be passed where a ``const char*`` is expected, and an str
+ will be returned if the return type is ``const char*``.
+
+* **templated classes**: Are represented in a meta-class style in python.
+ This looks a little bit confusing, but conceptually is rather natural.
+ For example, given the class ``std::vector<int>``, the meta-class part would
+ be ``std.vector`` in python.
+ Then, to get the instantiation on ``int``, do ``std.vector(int)`` and to
+ create an instance of that class, do ``std.vector(int)()``.
+ Note that templates can be build up by handing actual types to the class
+ instantiation (as done in this vector example), or by passing in the list of
+ template arguments as a string.
+ The former is a lot easier to work with if you have template instantiations
+ using classes that themselves are templates (etc.) in the arguments.
+ All classes must already exist in the loaded reflection info.
+
+* **typedefs**: Are simple python references to the actual classes to which
+ they refer.
+
+* **unary operators**: Are supported if a python equivalent exists, and if the
+ operator is defined in the C++ class.
+
+You can always find more detailed examples and see the full of supported
+features by looking at the tests in pypy/module/cppyy/test.
+
+If a feature or reflection info is missing, this is supposed to be handled
+gracefully.
+In fact, there are unit tests explicitly for this purpose (even as their use
+becomes less interesting over time, as the number of missing features
+decreases).
+Only when a missing feature is used, should there be an exception.
+For example, if no reflection info is available for a return type, then a
+class that has a method with that return type can still be used.
+Only that one specific method can not be used.
+
+
+Templates
+=========
+
+A bit of special care needs to be taken for the use of templates.
+For a templated class to be completely available, it must be guaranteed that
+said class is fully instantiated, and hence all executable C++ code is
+generated and compiled in.
+The easiest way to fulfill that guarantee is by explicit instantiation in the
+header file that is handed to ``genreflex``.
+The following example should make that clear::
+
+ $ cat MyTemplate.h
+ #include <vector>
+
+ class MyClass {
+ public:
+ MyClass(int i = -99) : m_i(i) {}
+ MyClass(const MyClass& s) : m_i(s.m_i) {}
+ MyClass& operator=(const MyClass& s) { m_i = s.m_i; return *this; }
+ ~MyClass() {}
+ int m_i;
+ };
+
+ template class std::vector<MyClass>;
+
+If you know for certain that all symbols will be linked in from other sources,
+you can also declare the explicit template instantiation ``extern``.
+
+Unfortunately, this is not enough for gcc.
+The iterators, if they are going to be used, need to be instantiated as well,
+as do the comparison operators on those iterators, as these live in an
+internal namespace, rather than in the iterator classes.
+One way to handle this, is to deal with this once in a macro, then reuse that
+macro for all ``vector`` classes.
+Thus, the header above needs this, instead of just the explicit instantiation
+of the ``vector<MyClass>``::
+
+ #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \
+ template class std::STLTYPE< TTYPE >; \
+ template class __gnu_cxx::__normal_iterator<TTYPE*, std::STLTYPE< TTYPE > >; \
+ template class __gnu_cxx::__normal_iterator<const TTYPE*, std::STLTYPE< TTYPE > >;\
+ namespace __gnu_cxx { \
+ template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \
+ const std::STLTYPE< TTYPE >::iterator&); \
+ template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \
+ const std::STLTYPE< TTYPE >::iterator&); \
+ }
+
+ STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass)
+
+Then, still for gcc, the selection file needs to contain the full hierarchy as
+well as the global overloads for comparisons for the iterators::
+
+ $ cat MyTemplate.xml
+ <lcgdict>
+ <class pattern="std::vector<*>" />
+ <class pattern="__gnu_cxx::__normal_iterator<*>" />
+ <class pattern="__gnu_cxx::new_allocator<*>" />
+ <class pattern="std::_Vector_base<*>" />
+ <class pattern="std::_Vector_base<*>::_Vector_impl" />
+ <class pattern="std::allocator<*>" />
+ <function name="__gnu_cxx::operator=="/>
+ <function name="__gnu_cxx::operator!="/>
+
+ <class name="MyClass" />
+ </lcgdict>
+
+Run the normal ``genreflex`` and compilation steps::
+
+ $ genreflex MyTemplate.h --selection=MyTemplate.xm
+ $ g++ -fPIC -rdynamic -O2 -shared -I$ROOTSYS/include MyTemplate_rflx.cpp -o libTemplateDict.so
+
+Note: this is a dirty corner that clearly could do with some automation,
+even if the macro already helps.
+Such automation is planned.
+In fact, in the cling world, the backend can perform the template
+instantations and generate the reflection info on the fly, and none of the
+above will any longer be necessary.
+
+Subsequent use should be as expected.
+Note the meta-class style of "instantiating" the template::
+
+ >>>> import cppyy
+ >>>> cppyy.load_reflection_info("libTemplateDict.so")
+ >>>> std = cppyy.gbl.std
+ >>>> MyClass = cppyy.gbl.MyClass
+ >>>> v = std.vector(MyClass)()
+ >>>> v += [MyClass(1), MyClass(2), MyClass(3)]
+ >>>> for m in v:
+ .... print m.m_i,
+ ....
+ 1 2 3
+ >>>>
+
+Other templates work similarly.
+The arguments to the template instantiation can either be a string with the
+full list of arguments, or the explicit classes.
+The latter makes for easier code writing if the classes passed to the
+instantiation are themselves templates.
+
+
+The fast lane
+=============
+
+The following is an experimental feature of cppyy, and that makes it doubly
+experimental, so caveat emptor.
+With a slight modification of Reflex, it can provide function pointers for
+C++ methods, and hence allow PyPy to call those pointers directly, rather than
+calling C++ through a Reflex stub.
+This results in a rather significant speed-up.
+Mind you, the normal stub path is not exactly slow, so for now only use this
+out of curiosity or if you really need it.
+
+To install this patch of Reflex, locate the file genreflex-methptrgetter.patch
+in pypy/module/cppyy and apply it to the genreflex python scripts found in
+``$ROOTSYS/lib``::
+
+ $ cd $ROOTSYS/lib
+ $ patch -p2 < genreflex-methptrgetter.patch
+
+With this patch, ``genreflex`` will have grown the ``--with-methptrgetter``
+option.
+Use this option when running ``genreflex``, and add the
+``-Wno-pmf-conversions`` option to ``g++`` when compiling.
+The rest works the same way: the fast path will be used transparently (which
+also means that you can't actually find out whether it is in use, other than
+by running a micro-benchmark).
+
+
+CPython
+=======
+
+Most of the ideas in cppyy come originally from the `PyROOT`_ project.
+Although PyROOT does not support Reflex directly, it has an alter ego called
+"PyCintex" that, in a somewhat roundabout way, does.
+If you installed ROOT, rather than just Reflex, PyCintex should be available
+immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
+variable.
+
+.. _`PyROOT`: http://root.cern.ch/drupal/content/pyroot
+
+There are a couple of minor differences between PyCintex and cppyy, most to do
+with naming.
+The one that you will run into directly, is that PyCintex uses a function
+called ``loadDictionary`` rather than ``load_reflection_info``.
+The reason for this is that Reflex calls the shared libraries that contain
+reflection info "dictionaries."
+However, in python, the name `dictionary` already has a well-defined meaning,
+so a more descriptive name was chosen for cppyy.
+In addition, PyCintex requires that the names of shared libraries so loaded
+start with "lib" in their name.
+The basic example above, rewritten for PyCintex thus goes like this::
+
+ $ python
+ >>> import PyCintex
+ >>> PyCintex.loadDictionary("libMyClassDict.so")
+ >>> myinst = PyCintex.gbl.MyClass(42)
+ >>> print myinst.GetMyInt()
+ 42
+ >>> myinst.SetMyInt(33)
+ >>> print myinst.m_myint
+ 33
+ >>> myinst.m_myint = 77
+ >>> print myinst.GetMyInt()
+ 77
+ >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works
+
+Other naming differences are such things as taking an address of an object.
+In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was
+made to follow the naming as in ``ctypes`` and hence use ``addressof``
+(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project
+follows camel-case, hence the differences).
+
+Of course, this is python, so if any of the naming is not to your liking, all
+you have to do is provide a wrapper script that you import instead of
+importing the ``cppyy`` or ``PyCintex`` modules directly.
+In that wrapper script you can rename methods exactly the way you need it.
+
+In the cling world, all these differences will be resolved.
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -23,6 +23,8 @@
* Write them in RPython as mixedmodule_, using *rffi* as bindings.
+* Write them in C++ and bind them through Reflex_ (EXPERIMENTAL)
+
.. _ctypes: #CTypes
.. _\_ffi: #LibFFI
.. _mixedmodule: #Mixed Modules
@@ -110,3 +112,34 @@
XXX we should provide detailed docs about lltype and rffi, especially if we
want people to follow that way.
+
+Reflex
+======
+
+This method is only experimental for now, and is being exercised on a branch,
+`reflex-support`_, so you will have to build PyPy yourself.
+The method works by using the `Reflex package`_ to provide reflection
+information of the C++ code, which is then used to automatically generate
+bindings at runtime, which can then be used from python.
+Full details are `available here`_.
+
+.. _`reflex-support`: cppyy.html
+.. _`Reflex package`: http://root.cern.ch/drupal/content/reflex
+.. _`available here`: cppyy.html
+
+Pros
+----
+
+If it works, it is mostly automatic, and hence easy in use.
+The bindings can make use of direct pointers, in which case the calls are
+very fast.
+
+Cons
+----
+
+C++ is a large language, and these bindings are not yet feature-complete.
+Although missing features should do no harm if you don't use them, if you do
+need a particular feature, it may be necessary to work around it in python
+or with a C++ helper function.
+Although Reflex works on various platforms, the bindings with PyPy have only
+been tested on Linux.
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -24,7 +24,8 @@
translation. Failing that, they will pick the most recent Visual Studio
compiler they can find. In addition, the target architecture
(32 bits, 64 bits) is automatically selected. A 32 bit build can only be built
-using a 32 bit Python and vice versa.
+using a 32 bit Python and vice versa. By default pypy is built using the
+Multi-threaded DLL (/MD) runtime environment.
**Note:** PyPy is currently not supported for 64 bit Windows, and translation
will fail in this case.
@@ -102,10 +103,12 @@
Download the source code of expat on sourceforge:
http://sourceforge.net/projects/expat/ and extract it in the base
-directory. Then open the project file ``expat.dsw`` with Visual
+directory. Version 2.1.0 is known to pass tests. Then open the project
+file ``expat.dsw`` with Visual
Studio; follow the instruction for converting the project files,
-switch to the "Release" configuration, and build the solution (the
-``expat`` project is actually enough for pypy).
+switch to the "Release" configuration, reconfigure the runtime for
+Multi-threaded DLL (/MD) and build the solution (the ``expat`` project
+is actually enough for pypy).
Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in
your PATH.
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -304,14 +304,19 @@
# produce compatible pycs.
if (self.space.isinstance_w(w_obj, self.space.w_unicode) and
self.space.isinstance_w(w_const, self.space.w_unicode)):
- unistr = self.space.unicode_w(w_const)
- if len(unistr) == 1:
- ch = ord(unistr[0])
- else:
- ch = 0
- if (ch > 0xFFFF or
- (MAXUNICODE == 0xFFFF and 0xD800 <= ch <= 0xDFFF)):
- return subs
+ #unistr = self.space.unicode_w(w_const)
+ #if len(unistr) == 1:
+ # ch = ord(unistr[0])
+ #else:
+ # ch = 0
+ #if (ch > 0xFFFF or
+ # (MAXUNICODE == 0xFFFF and 0xD800 <= ch <= 0xDFFF)):
+ # --XXX-- for now we always disable optimization of
+ # u'...'[constant] because the tests above are not
+ # enough to fix issue5057 (CPython has the same
+ # problem as of April 24, 2012).
+ # See test_const_fold_unicode_subscr
+ return subs
return ast.Const(w_const, subs.lineno, subs.col_offset)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -844,7 +844,8 @@
return u"abc"[0]
"""
counts = self.count_instructions(source)
- assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
+ if 0: # xxx later?
+ assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
# getitem outside of the BMP should not be optimized
source = """def f():
@@ -854,12 +855,20 @@
assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
ops.RETURN_VALUE: 1}
+ source = """def f():
+ return u"\U00012345abcdef"[3]
+ """
+ counts = self.count_instructions(source)
+ assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
+ ops.RETURN_VALUE: 1}
+
monkeypatch.setattr(optimize, "MAXUNICODE", 0xFFFF)
source = """def f():
return u"\uE01F"[0]
"""
counts = self.count_instructions(source)
- assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
+ if 0: # xxx later?
+ assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
monkeypatch.undo()
# getslice is not yet optimized.
diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
--- a/pypy/jit/metainterp/heapcache.py
+++ b/pypy/jit/metainterp/heapcache.py
@@ -20,6 +20,7 @@
self.dependencies = {}
# contains frame boxes that are not virtualizables
self.nonstandard_virtualizables = {}
+
# heap cache
# maps descrs to {from_box, to_box} dicts
self.heap_cache = {}
@@ -29,6 +30,26 @@
# cache the length of arrays
self.length_cache = {}
+ # replace_box is called surprisingly often, therefore it's not efficient
+ # to go over all the dicts and fix them.
+ # instead, these two dicts are kept, and a replace_box adds an entry to
+ # each of them.
+ # every time one of the dicts heap_cache, heap_array_cache, length_cache
+ # is accessed, suitable indirections need to be performed
+
+ # this looks all very subtle, but in practice the patterns of
+ # replacements should not be that complex. Usually a box is replaced by
+ # a const, once. Also, if something goes wrong, the effect is that less
+ # caching than possible is done, which is not a huge problem.
+ self.input_indirections = {}
+ self.output_indirections = {}
+
+ def _input_indirection(self, box):
+ return self.input_indirections.get(box, box)
+
+ def _output_indirection(self, box):
+ return self.output_indirections.get(box, box)
+
def invalidate_caches(self, opnum, descr, argboxes):
self.mark_escaped(opnum, argboxes)
self.clear_caches(opnum, descr, argboxes)
@@ -132,14 +153,16 @@
self.arraylen_now_known(box, lengthbox)
def getfield(self, box, descr):
+ box = self._input_indirection(box)
d = self.heap_cache.get(descr, None)
if d:
tobox = d.get(box, None)
- if tobox:
- return tobox
+ return self._output_indirection(tobox)
return None
def getfield_now_known(self, box, descr, fieldbox):
+ box = self._input_indirection(box)
+ fieldbox = self._input_indirection(fieldbox)
self.heap_cache.setdefault(descr, {})[box] = fieldbox
def setfield(self, box, descr, fieldbox):
@@ -148,6 +171,8 @@
self.heap_cache[descr] = new_d
def _do_write_with_aliasing(self, d, box, fieldbox):
+ box = self._input_indirection(box)
+ fieldbox = self._input_indirection(fieldbox)
# slightly subtle logic here
# a write to an arbitrary box, all other boxes can alias this one
if not d or box not in self.new_boxes:
@@ -166,6 +191,7 @@
return new_d
def getarrayitem(self, box, descr, indexbox):
+ box = self._input_indirection(box)
if not isinstance(indexbox, ConstInt):
return
index = indexbox.getint()
@@ -173,9 +199,11 @@
if cache:
indexcache = cache.get(index, None)
if indexcache is not None:
- return indexcache.get(box, None)
+ return self._output_indirection(indexcache.get(box, None))
def getarrayitem_now_known(self, box, descr, indexbox, valuebox):
+ box = self._input_indirection(box)
+ valuebox = self._input_indirection(valuebox)
if not isinstance(indexbox, ConstInt):
return
index = indexbox.getint()
@@ -198,25 +226,13 @@
cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox)
def arraylen(self, box):
- return self.length_cache.get(box, None)
+ box = self._input_indirection(box)
+ return self._output_indirection(self.length_cache.get(box, None))
def arraylen_now_known(self, box, lengthbox):
- self.length_cache[box] = lengthbox
-
- def _replace_box(self, d, oldbox, newbox):
- new_d = {}
- for frombox, tobox in d.iteritems():
- if frombox is oldbox:
- frombox = newbox
- if tobox is oldbox:
- tobox = newbox
- new_d[frombox] = tobox
- return new_d
+ box = self._input_indirection(box)
+ self.length_cache[box] = self._input_indirection(lengthbox)
def replace_box(self, oldbox, newbox):
- for descr, d in self.heap_cache.iteritems():
- self.heap_cache[descr] = self._replace_box(d, oldbox, newbox)
- for descr, d in self.heap_array_cache.iteritems():
- for index, cache in d.iteritems():
- d[index] = self._replace_box(cache, oldbox, newbox)
- self.length_cache = self._replace_box(self.length_cache, oldbox, newbox)
+ self.input_indirections[self._output_indirection(newbox)] = self._input_indirection(oldbox)
+ self.output_indirections[self._input_indirection(oldbox)] = self._output_indirection(newbox)
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -7,7 +7,7 @@
import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt
import pypy.jit.metainterp.optimizeopt.virtualize as virtualize
from pypy.jit.metainterp.optimize import InvalidLoop
-from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt
+from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, get_const_ptr_for_string
from pypy.jit.metainterp import executor, compile, resume, history
from pypy.jit.metainterp.resoperation import rop, opname, ResOperation
from pypy.rlib.rarithmetic import LONG_BIT
@@ -5067,6 +5067,25 @@
"""
self.optimize_strunicode_loop(ops, expected)
+ def test_call_pure_vstring_const(self):
+ ops = """
+ []
+ p0 = newstr(3)
+ strsetitem(p0, 0, 97)
+ strsetitem(p0, 1, 98)
+ strsetitem(p0, 2, 99)
+ i0 = call_pure(123, p0, descr=nonwritedescr)
+ finish(i0)
+ """
+ expected = """
+ []
+ finish(5)
+ """
+ call_pure_results = {
+ (ConstInt(123), get_const_ptr_for_string("abc"),): ConstInt(5),
+ }
+ self.optimize_loop(ops, expected, call_pure_results)
+
class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
pass
diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
--- a/pypy/jit/metainterp/test/test_heapcache.py
+++ b/pypy/jit/metainterp/test/test_heapcache.py
@@ -2,12 +2,14 @@
from pypy.jit.metainterp.resoperation import rop
from pypy.jit.metainterp.history import ConstInt
-box1 = object()
-box2 = object()
-box3 = object()
-box4 = object()
+box1 = "box1"
+box2 = "box2"
+box3 = "box3"
+box4 = "box4"
+box5 = "box5"
lengthbox1 = object()
lengthbox2 = object()
+lengthbox3 = object()
descr1 = object()
descr2 = object()
descr3 = object()
@@ -276,11 +278,43 @@
h.setfield(box1, descr2, box3)
h.setfield(box2, descr3, box3)
h.replace_box(box1, box4)
- assert h.getfield(box1, descr1) is None
- assert h.getfield(box1, descr2) is None
assert h.getfield(box4, descr1) is box2
assert h.getfield(box4, descr2) is box3
assert h.getfield(box2, descr3) is box3
+ h.setfield(box4, descr1, box3)
+ assert h.getfield(box4, descr1) is box3
+
+ h = HeapCache()
+ h.setfield(box1, descr1, box2)
+ h.setfield(box1, descr2, box3)
+ h.setfield(box2, descr3, box3)
+ h.replace_box(box3, box4)
+ assert h.getfield(box1, descr1) is box2
+ assert h.getfield(box1, descr2) is box4
+ assert h.getfield(box2, descr3) is box4
+
+ def test_replace_box_twice(self):
+ h = HeapCache()
+ h.setfield(box1, descr1, box2)
+ h.setfield(box1, descr2, box3)
+ h.setfield(box2, descr3, box3)
+ h.replace_box(box1, box4)
+ h.replace_box(box4, box5)
+ assert h.getfield(box5, descr1) is box2
+ assert h.getfield(box5, descr2) is box3
+ assert h.getfield(box2, descr3) is box3
+ h.setfield(box5, descr1, box3)
+ assert h.getfield(box4, descr1) is box3
+
+ h = HeapCache()
+ h.setfield(box1, descr1, box2)
+ h.setfield(box1, descr2, box3)
+ h.setfield(box2, descr3, box3)
+ h.replace_box(box3, box4)
+ h.replace_box(box4, box5)
+ assert h.getfield(box1, descr1) is box2
+ assert h.getfield(box1, descr2) is box5
+ assert h.getfield(box2, descr3) is box5
def test_replace_box_array(self):
h = HeapCache()
@@ -291,9 +325,6 @@
h.setarrayitem(box3, descr2, index2, box1)
h.setarrayitem(box2, descr3, index2, box3)
h.replace_box(box1, box4)
- assert h.getarrayitem(box1, descr1, index1) is None
- assert h.getarrayitem(box1, descr2, index1) is None
- assert h.arraylen(box1) is None
assert h.arraylen(box4) is lengthbox1
assert h.getarrayitem(box4, descr1, index1) is box2
assert h.getarrayitem(box4, descr2, index1) is box3
@@ -304,6 +335,27 @@
h.replace_box(lengthbox1, lengthbox2)
assert h.arraylen(box4) is lengthbox2
+ def test_replace_box_array_twice(self):
+ h = HeapCache()
+ h.setarrayitem(box1, descr1, index1, box2)
+ h.setarrayitem(box1, descr2, index1, box3)
+ h.arraylen_now_known(box1, lengthbox1)
+ h.setarrayitem(box2, descr1, index2, box1)
+ h.setarrayitem(box3, descr2, index2, box1)
+ h.setarrayitem(box2, descr3, index2, box3)
+ h.replace_box(box1, box4)
+ h.replace_box(box4, box5)
+ assert h.arraylen(box4) is lengthbox1
+ assert h.getarrayitem(box5, descr1, index1) is box2
+ assert h.getarrayitem(box5, descr2, index1) is box3
+ assert h.getarrayitem(box2, descr1, index2) is box5
+ assert h.getarrayitem(box3, descr2, index2) is box5
+ assert h.getarrayitem(box2, descr3, index2) is box3
+
+ h.replace_box(lengthbox1, lengthbox2)
+ h.replace_box(lengthbox2, lengthbox3)
+ assert h.arraylen(box4) is lengthbox3
+
def test_ll_arraycopy(self):
h = HeapCache()
h.new_array(box1, lengthbox1)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -103,8 +103,8 @@
""".split()
for name in constant_names:
setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
-udir.join('pypy_decl.h').write("/* Will be filled later */")
-udir.join('pypy_macros.h').write("/* Will be filled later */")
+udir.join('pypy_decl.h').write("/* Will be filled later */\n")
+udir.join('pypy_macros.h').write("/* Will be filled later */\n")
globals().update(rffi_platform.configure(CConfig_constants))
def copy_header_files(dstdir):
diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -38,10 +38,19 @@
PyObject_VAR_HEAD
} PyVarObject;
+#ifndef PYPY_DEBUG_REFCOUNT
#define Py_INCREF(ob) (Py_IncRef((PyObject *)ob))
#define Py_DECREF(ob) (Py_DecRef((PyObject *)ob))
#define Py_XINCREF(ob) (Py_IncRef((PyObject *)ob))
#define Py_XDECREF(ob) (Py_DecRef((PyObject *)ob))
+#else
+#define Py_INCREF(ob) (((PyObject *)ob)->ob_refcnt++)
+#define Py_DECREF(ob) ((((PyObject *)ob)->ob_refcnt > 1) ? \
+ ((PyObject *)ob)->ob_refcnt-- : (Py_DecRef((PyObject *)ob)))
+
+#define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0)
+#define Py_XDECREF(op) do { if ((op) == NULL) ; else Py_DECREF(op); } while (0)
+#endif
#define Py_CLEAR(op) \
do { \
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -110,6 +110,16 @@
space.call_method(w_list, "reverse")
return 0
+ at cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
+def PyList_GetSlice(space, w_list, low, high):
+ """Return a list of the objects in list containing the objects between low
+ and high. Return NULL and set an exception if unsuccessful. Analogous
+ to list[low:high]. Negative indices, as when slicing from Python, are not
+ supported."""
+ w_start = space.wrap(low)
+ w_stop = space.wrap(high)
+ return space.getslice(w_list, w_start, w_stop)
+
@cpython_api([PyObject, Py_ssize_t, Py_ssize_t, PyObject], rffi.INT_real, error=-1)
def PyList_SetSlice(space, w_list, low, high, w_sequence):
"""Set the slice of list between low and high to the contents of
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -381,6 +381,15 @@
This is the equivalent of the Python expression hash(o)."""
return space.int_w(space.hash(w_obj))
+ at cpython_api([PyObject], lltype.Signed, error=-1)
+def PyObject_HashNotImplemented(space, o):
+ """Set a TypeError indicating that type(o) is not hashable and return -1.
+ This function receives special treatment when stored in a tp_hash slot,
+ allowing a type to explicitly indicate to the interpreter that it is not
+ hashable.
+ """
+ raise OperationError(space.w_TypeError, space.wrap("unhashable type"))
+
@cpython_api([PyObject], PyObject)
def PyObject_Dir(space, w_o):
"""This is equivalent to the Python expression dir(o), returning a (possibly
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -7,7 +7,7 @@
cpython_api, generic_cpy_call, PyObject, Py_ssize_t)
from pypy.module.cpyext.typeobjectdefs import (
unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc,
- getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc,
+ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry,
ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc,
cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc,
readbufferproc)
@@ -60,6 +60,16 @@
args_w = space.fixedview(w_args)
return generic_cpy_call(space, func_binary, w_self, args_w[0])
+def wrap_inquirypred(space, w_self, w_args, func):
+ func_inquiry = rffi.cast(inquiry, func)
+ check_num_args(space, w_args, 0)
+ args_w = space.fixedview(w_args)
+ res = generic_cpy_call(space, func_inquiry, w_self)
+ res = rffi.cast(lltype.Signed, res)
+ if res == -1:
+ space.fromcache(State).check_and_raise_exception()
+ return space.wrap(bool(res))
+
def wrap_getattr(space, w_self, w_args, func):
func_target = rffi.cast(getattrfunc, func)
check_num_args(space, w_args, 1)
diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py
--- a/pypy/module/cpyext/stringobject.py
+++ b/pypy/module/cpyext/stringobject.py
@@ -294,6 +294,26 @@
w_errors = space.wrap(rffi.charp2str(errors))
return space.call_method(w_str, 'encode', w_encoding, w_errors)
+ at cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject)
+def PyString_AsDecodedObject(space, w_str, encoding, errors):
+ """Decode a string object by passing it to the codec registered
+ for encoding and return the result as Python object. encoding and
+ errors have the same meaning as the parameters of the same name in
+ the string encode() method. The codec to be used is looked up
+ using the Python codec registry. Return NULL if an exception was
+ raised by the codec.
+
+ This function is not available in 3.x and does not have a PyBytes alias."""
+ if not PyString_Check(space, w_str):
+ PyErr_BadArgument(space)
+
+ w_encoding = w_errors = space.w_None
+ if encoding:
+ w_encoding = space.wrap(rffi.charp2str(encoding))
+ if errors:
+ w_errors = space.wrap(rffi.charp2str(errors))
+ return space.call_method(w_str, "decode", w_encoding, w_errors)
+
@cpython_api([PyObject, PyObject], PyObject)
def _PyString_Join(space, w_sep, w_seq):
return space.call_method(w_sep, 'join', w_seq)
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -1405,17 +1405,6 @@
"""
raise NotImplementedError
- at cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
-def PyList_GetSlice(space, list, low, high):
- """Return a list of the objects in list containing the objects between low
- and high. Return NULL and set an exception if unsuccessful. Analogous
- to list[low:high]. Negative indices, as when slicing from Python, are not
- supported.
-
- This function used an int for low and high. This might
- require changes in your code for properly supporting 64-bit systems."""
- raise NotImplementedError
-
@cpython_api([Py_ssize_t], PyObject)
def PyLong_FromSsize_t(space, v):
"""Return a new PyLongObject object from a C Py_ssize_t, or
@@ -1606,15 +1595,6 @@
for PyObject_Str()."""
raise NotImplementedError
- at cpython_api([PyObject], lltype.Signed, error=-1)
-def PyObject_HashNotImplemented(space, o):
- """Set a TypeError indicating that type(o) is not hashable and return -1.
- This function receives special treatment when stored in a tp_hash slot,
- allowing a type to explicitly indicate to the interpreter that it is not
- hashable.
- """
- raise NotImplementedError
-
@cpython_api([], PyFrameObject)
def PyEval_GetFrame(space):
"""Return the current thread state's frame, which is NULL if no frame is
@@ -1737,17 +1717,6 @@
changes in your code for properly supporting 64-bit systems."""
raise NotImplementedError
- at cpython_api([PyObject, rffi.CCHARP, rffi.CCHARP], PyObject)
-def PyString_AsDecodedObject(space, str, encoding, errors):
- """Decode a string object by passing it to the codec registered for encoding and
- return the result as Python object. encoding and errors have the same
- meaning as the parameters of the same name in the string encode() method.
- The codec to be used is looked up using the Python codec registry. Return NULL
- if an exception was raised by the codec.
-
- This function is not available in 3.x and does not have a PyBytes alias."""
- raise NotImplementedError
-
@cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.CCHARP], PyObject)
def PyString_Encode(space, s, size, encoding, errors):
"""Encode the char buffer of the given size by passing it to the codec
diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py
--- a/pypy/module/cpyext/test/test_listobject.py
+++ b/pypy/module/cpyext/test/test_listobject.py
@@ -58,6 +58,11 @@
w_t = api.PyList_AsTuple(w_l)
assert space.unwrap(w_t) == (3, 2, 1)
+ def test_list_getslice(self, space, api):
+ w_l = space.newlist([space.wrap(3), space.wrap(2), space.wrap(1)])
+ w_s = api.PyList_GetSlice(w_l, 1, 5)
+ assert space.unwrap(w_s) == [2, 1]
+
class AppTestListObject(AppTestCpythonExtensionBase):
def test_listobject(self):
import sys
diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py
--- a/pypy/module/cpyext/test/test_stringobject.py
+++ b/pypy/module/cpyext/test/test_stringobject.py
@@ -307,6 +307,13 @@
space.wrap(2), lltype.nullptr(rffi.CCHARP.TO), lltype.nullptr(rffi.CCHARP.TO)
)
+ def test_AsDecodedObject(self, space, api):
+ w_str = space.wrap('caf\xe9')
+ encoding = rffi.str2charp("latin-1")
+ w_res = api.PyString_AsDecodedObject(w_str, encoding, None)
+ rffi.free_charp(encoding)
+ assert space.unwrap(w_res) == u"caf\xe9"
+
def test_eq(self, space, api):
assert 1 == api._PyString_Eq(space.wrap("hello"), space.wrap("hello"))
assert 0 == api._PyString_Eq(space.wrap("hello"), space.wrap("world"))
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -488,3 +488,55 @@
assert type(it) is type(iter([]))
assert module.tp_iternext(it) == 1
raises(StopIteration, module.tp_iternext, it)
+
+ def test_bool(self):
+ module = self.import_extension('foo', [
+ ("newInt", "METH_VARARGS",
+ """
+ IntLikeObject *intObj;
+ long intval;
+ PyObject *name;
+
+ if (!PyArg_ParseTuple(args, "i", &intval))
+ return NULL;
+
+ IntLike_Type.tp_as_number = &intlike_as_number;
+ intlike_as_number.nb_nonzero = intlike_nb_nonzero;
+ if (PyType_Ready(&IntLike_Type) < 0) return NULL;
+ intObj = PyObject_New(IntLikeObject, &IntLike_Type);
+ if (!intObj) {
+ return NULL;
+ }
+
+ intObj->value = intval;
+ return (PyObject *)intObj;
+ """)],
+ """
+ typedef struct
+ {
+ PyObject_HEAD
+ int value;
+ } IntLikeObject;
+
+ static int
+ intlike_nb_nonzero(IntLikeObject *v)
+ {
+ if (v->value == -42) {
+ PyErr_SetNone(PyExc_ValueError);
+ return -1;
+ }
+ return v->value;
+ }
+
+ PyTypeObject IntLike_Type = {
+ PyObject_HEAD_INIT(0)
+ /*ob_size*/ 0,
+ /*tp_name*/ "IntLike",
+ /*tp_basicsize*/ sizeof(IntLikeObject),
+ };
+ static PyNumberMethods intlike_as_number;
+ """)
+ assert not bool(module.newInt(0))
+ assert bool(module.newInt(1))
+ assert bool(module.newInt(-1))
+ raises(ValueError, bool, module.newInt(-42))
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -5,6 +5,7 @@
interpleveldefs = {
'debug_repr': 'interp_extras.debug_repr',
'remove_invalidates': 'interp_extras.remove_invalidates',
+ 'set_invalidation': 'interp_extras.set_invalidation',
}
appleveldefs = {}
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -10,6 +10,7 @@
from pypy.module.micronumpy.interp_dtype import get_dtype_cache
from pypy.module.micronumpy.interp_numarray import (Scalar, BaseArray,
scalar_w, W_NDimArray, array)
+from pypy.module.micronumpy.interp_arrayops import where
from pypy.module.micronumpy import interp_ufuncs
from pypy.rlib.objectmodel import specialize, instantiate
@@ -35,6 +36,7 @@
SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any",
"unegative", "flat", "tostring"]
TWO_ARG_FUNCTIONS = ["dot", 'take']
+THREE_ARG_FUNCTIONS = ['where']
class FakeSpace(object):
w_ValueError = None
@@ -445,14 +447,25 @@
arg = self.args[1].execute(interp)
if not isinstance(arg, BaseArray):
raise ArgumentNotAnArray
- if not isinstance(arg, BaseArray):
- raise ArgumentNotAnArray
if self.name == "dot":
w_res = arr.descr_dot(interp.space, arg)
elif self.name == 'take':
w_res = arr.descr_take(interp.space, arg)
else:
assert False # unreachable code
+ elif self.name in THREE_ARG_FUNCTIONS:
+ if len(self.args) != 3:
+ raise ArgumentMismatch
+ arg1 = self.args[1].execute(interp)
+ arg2 = self.args[2].execute(interp)
+ if not isinstance(arg1, BaseArray):
+ raise ArgumentNotAnArray
+ if not isinstance(arg2, BaseArray):
+ raise ArgumentNotAnArray
+ if self.name == "where":
+ w_res = where(interp.space, arr, arg1, arg2)
+ else:
+ assert False
else:
raise WrongFunctionName
if isinstance(w_res, BaseArray):
diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py
--- a/pypy/module/micronumpy/interp_arrayops.py
+++ b/pypy/module/micronumpy/interp_arrayops.py
@@ -4,7 +4,7 @@
from pypy.module.micronumpy import signature
class WhereArray(VirtualArray):
- def __init__(self, arr, x, y):
+ def __init__(self, space, arr, x, y):
self.arr = arr
self.x = x
self.y = y
@@ -87,4 +87,4 @@
arr = convert_to_array(space, w_arr)
x = convert_to_array(space, w_x)
y = convert_to_array(space, w_y)
- return WhereArray(arr, x, y)
+ return WhereArray(space, arr, x, y)
diff --git a/pypy/module/micronumpy/interp_extras.py b/pypy/module/micronumpy/interp_extras.py
--- a/pypy/module/micronumpy/interp_extras.py
+++ b/pypy/module/micronumpy/interp_extras.py
@@ -1,5 +1,5 @@
from pypy.interpreter.gateway import unwrap_spec
-from pypy.module.micronumpy.interp_numarray import BaseArray
+from pypy.module.micronumpy.interp_numarray import BaseArray, get_numarray_cache
@unwrap_spec(array=BaseArray)
@@ -13,3 +13,7 @@
"""
del array.invalidates[:]
return space.w_None
+
+ at unwrap_spec(arg=bool)
+def set_invalidation(space, arg):
+ get_numarray_cache(space).enable_invalidation = arg
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -72,9 +72,10 @@
arr.force_if_needed()
del self.invalidates[:]
- def add_invalidates(self, other):
- self.invalidates.append(other)
-
+ def add_invalidates(self, space, other):
+ if get_numarray_cache(space).enable_invalidation:
+ self.invalidates.append(other)
+
def descr__new__(space, w_subtype, w_size, w_dtype=None):
dtype = space.interp_w(interp_dtype.W_Dtype,
space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)
@@ -1583,3 +1584,10 @@
arr.fill(space, space.wrap(False))
return arr
return space.wrap(False)
+
+class NumArrayCache(object):
+ def __init__(self, space):
+ self.enable_invalidation = True
+
+def get_numarray_cache(space):
+ return space.fromcache(NumArrayCache)
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -278,7 +278,7 @@
else:
w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype,
res_dtype, w_obj)
- w_obj.add_invalidates(w_res)
+ w_obj.add_invalidates(space, w_res)
return w_res
@@ -347,8 +347,8 @@
w_res = Call2(self.func, self.name,
new_shape, calc_dtype,
res_dtype, w_lhs, w_rhs, out)
- w_lhs.add_invalidates(w_res)
- w_rhs.add_invalidates(w_res)
+ w_lhs.add_invalidates(space, w_res)
+ w_rhs.add_invalidates(space, w_res)
if out:
w_res.get_concrete()
return w_res
diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py
--- a/pypy/module/micronumpy/test/test_arrayops.py
+++ b/pypy/module/micronumpy/test/test_arrayops.py
@@ -7,3 +7,10 @@
a = [1, 2, 3, 0, -3]
a = where(array(a) > 0, ones(5), zeros(5))
assert (a == [1, 1, 1, 0, 0]).all()
+
+ def test_where_invalidates(self):
+ from _numpypy import where, ones, zeros, array
+ a = array([1, 2, 3, 0, -3])
+ b = where(a > 0, ones(5), zeros(5))
+ a[0] = 0
+ assert (b == [1, 1, 1, 0, 0]).all()
diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py
--- a/pypy/module/micronumpy/test/test_compile.py
+++ b/pypy/module/micronumpy/test/test_compile.py
@@ -270,3 +270,13 @@
b -> 2
""")
assert interp.results[0].value == 3
+
+ def test_where(self):
+ interp = self.run('''
+ a = [1, 0, 3, 0]
+ b = [1, 1, 1, 1]
+ c = [0, 0, 0, 0]
+ d = where(a, b, c)
+ d -> 1
+ ''')
+ assert interp.results[0].value == 0
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -1830,6 +1830,19 @@
a[a & 1 == 1] = array([8, 9, 10])
assert (a == [[0, 8], [2, 9], [4, 10]]).all()
+ def test_array_indexing_bool_setitem_multidim(self):
+ from _numpypy import arange
+ a = arange(10).reshape(5, 2)
+ a[a & 1 == 0] = 15
+ assert (a == [[15, 1], [15, 3], [15, 5], [15, 7], [15, 9]]).all()
+
+ def test_array_indexing_bool_setitem_2(self):
+ from _numpypy import arange
+ a = arange(10).reshape(5, 2)
+ a = a[::2]
+ a[a & 1 == 0] = 15
+ assert (a == [[15, 1], [15, 5], [15, 9]]).all()
+
def test_copy_kwarg(self):
from _numpypy import array
x = array([1, 2, 3])
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -1,7 +1,6 @@
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
-
class AppTestUfuncs(BaseNumpyAppTest):
def test_ufunc_instance(self):
from _numpypy import add, ufunc
@@ -149,7 +148,11 @@
assert math.isnan(fmax(0, nan))
assert math.isnan(fmax(nan, nan))
# The numpy docs specify that the FIRST NaN should be used if both are NaN
- assert math.copysign(1.0, fmax(nnan, nan)) == -1.0
+ # Since comparisons with nnan and nan all return false,
+ # use copysign on both sides to sidestep bug in nan representaion
+ # on Microsoft win32
+ assert math.copysign(1., fmax(nnan, nan)) == math.copysign(1., nnan)
+
def test_fmin(self):
from _numpypy import fmin
@@ -165,7 +168,9 @@
assert math.isnan(fmin(0, nan))
assert math.isnan(fmin(nan, nan))
# The numpy docs specify that the FIRST NaN should be used if both are NaN
- assert math.copysign(1.0, fmin(nnan, nan)) == -1.0
+ # use copysign on both sides to sidestep bug in nan representaion
+ # on Microsoft win32
+ assert math.copysign(1., fmin(nnan, nan)) == math.copysign(1., nnan)
def test_fmod(self):
from _numpypy import fmod
diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py
--- a/pypy/module/mmap/test/test_mmap.py
+++ b/pypy/module/mmap/test/test_mmap.py
@@ -596,7 +596,7 @@
import sys
size = 0x14FFFFFFF
if sys.platform.startswith('win') or sys.platform == 'darwin':
- self.skip('test requires %s bytes and a long time to run' % size)
+ skip('test requires %s bytes and a long time to run' % size)
with open(self.tmpname, "w+b") as f:
f.seek(size)
@@ -618,7 +618,7 @@
import sys
size = 0x17FFFFFFF
if sys.platform.startswith('win') or sys.platform == 'darwin':
- self.skip('test requires %s bytes and a long time to run' % size)
+ skip('test requires %s bytes and a long time to run' % size)
with open(self.tmpname, "w+b") as f:
f.seek(size)
diff --git a/pypy/pytest.ini b/pypy/pytest.ini
--- a/pypy/pytest.ini
+++ b/pypy/pytest.ini
@@ -1,2 +1,2 @@
[pytest]
-addopts = --assert=plain -rf
+addopts = --assert=reinterp -rf
diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py
--- a/pypy/rlib/rbigint.py
+++ b/pypy/rlib/rbigint.py
@@ -40,7 +40,7 @@
# In that case, do 5 bits at a time. The potential drawback is that
# a table of 2**5 intermediate results is computed.
-## FIVEARY_CUTOFF = 8 disabled for now
+FIVEARY_CUTOFF = 8
def _mask_digit(x):
@@ -456,7 +456,7 @@
# python adaptation: moved macros REDUCE(X) and MULT(X, Y, result)
# into helper function result = _help_mult(x, y, c)
- if 1: ## b.numdigits() <= FIVEARY_CUTOFF:
+ if b.numdigits() <= FIVEARY_CUTOFF:
# Left-to-right binary exponentiation (HAC Algorithm 14.79)
# http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf
i = b.numdigits() - 1
@@ -469,30 +469,51 @@
z = _help_mult(z, a, c)
j >>= 1
i -= 1
-## else:
-## This code is disabled for now, because it assumes that
-## SHIFT is a multiple of 5. It could be fixed but it looks
-## like it's more troubles than benefits...
-##
-## # Left-to-right 5-ary exponentiation (HAC Algorithm 14.82)
-## # This is only useful in the case where c != None.
-## # z still holds 1L
-## table = [z] * 32
-## table[0] = z
-## for i in range(1, 32):
-## table[i] = _help_mult(table[i-1], a, c)
-## i = b.numdigits() - 1
-## while i >= 0:
-## bi = b.digit(i)
-## j = SHIFT - 5
-## while j >= 0:
-## index = (bi >> j) & 0x1f
-## for k in range(5):
-## z = _help_mult(z, z, c)
-## if index:
-## z = _help_mult(z, table[index], c)
-## j -= 5
-## i -= 1
+ else:
+ # Left-to-right 5-ary exponentiation (HAC Algorithm 14.82)
+ # This is only useful in the case where c != None.
+ # z still holds 1L
+ table = [z] * 32
+ table[0] = z
+ for i in range(1, 32):
+ table[i] = _help_mult(table[i-1], a, c)
+ i = b.numdigits()
+ # Note that here SHIFT is not a multiple of 5. The difficulty
+ # is to extract 5 bits at a time from 'b', starting from the
+ # most significant digits, so that at the end of the algorithm
+ # it falls exactly to zero.
+ # m = max number of bits = i * SHIFT
+ # m+ = m rounded up to the next multiple of 5
+ # j = (m+) % SHIFT = (m+) - (i * SHIFT)
+ # (computed without doing "i * SHIFT", which might overflow)
+ j = i % 5
+ if j != 0:
+ j = 5 - j
+ if not we_are_translated():
+ assert j == (i*SHIFT+4)//5*5 - i*SHIFT
+ #
+ accum = r_uint(0)
+ while True:
+ j -= 5
+ if j >= 0:
+ index = (accum >> j) & 0x1f
+ else:
+ # 'accum' does not have enough digit.
+ # must get the next digit from 'b' in order to complete
+ i -= 1
+ if i < 0:
+ break # done
+ bi = b.udigit(i)
+ index = ((accum << (-j)) | (bi >> (j+SHIFT))) & 0x1f
+ accum = bi
+ j += SHIFT
+ #
+ for k in range(5):
+ z = _help_mult(z, z, c)
+ if index:
+ z = _help_mult(z, table[index], c)
+ #
+ assert j == -5
if negativeOutput and z.sign != 0:
z = z.sub(c)
diff --git a/pypy/rlib/runicode.py b/pypy/rlib/runicode.py
--- a/pypy/rlib/runicode.py
+++ b/pypy/rlib/runicode.py
@@ -1234,7 +1234,7 @@
pos += 1
continue
- if 0xD800 <= oc < 0xDC00 and pos + 1 < size:
+ if MAXUNICODE < 65536 and 0xD800 <= oc < 0xDC00 and pos + 1 < size:
# Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes
pos += 1
oc2 = ord(s[pos])
@@ -1350,6 +1350,20 @@
pos = 0
while pos < size:
oc = ord(s[pos])
+
+ if MAXUNICODE < 65536 and 0xD800 <= oc < 0xDC00 and pos + 1 < size:
+ # Map UTF-16 surrogate pairs to Unicode \UXXXXXXXX escapes
+ pos += 1
+ oc2 = ord(s[pos])
+
+ if 0xDC00 <= oc2 <= 0xDFFF:
+ ucs = (((oc & 0x03FF) << 10) | (oc2 & 0x03FF)) + 0x00010000
+ raw_unicode_escape_helper(result, ucs)
+ pos += 1
+ continue
+ # Fall through: isolated surrogates are copied as-is
+ pos -= 1
+
if oc < 0x100:
result.append(chr(oc))
else:
diff --git a/pypy/rlib/test/test_rbigint.py b/pypy/rlib/test/test_rbigint.py
--- a/pypy/rlib/test/test_rbigint.py
+++ b/pypy/rlib/test/test_rbigint.py
@@ -379,6 +379,18 @@
for n, expected in [(37, 9), (1291, 931), (67889, 39464)]:
v = two.pow(t, rbigint.fromint(n))
assert v.toint() == expected
+ #
+ # more tests, comparing against CPython's answer
+ enabled = sample(range(5*32), 10)
+ for i in range(5*32):
+ t = t.mul(two) # add one random bit
+ if random() >= 0.5:
+ t = t.add(rbigint.fromint(1))
+ if i not in enabled:
+ continue # don't take forever
+ n = randint(1, sys.maxint)
+ v = two.pow(t, rbigint.fromint(n))
+ assert v.toint() == pow(2, t.tolong(), n)
def test_pow_lln(self):
x = 10L
diff --git a/pypy/rlib/test/test_runicode.py b/pypy/rlib/test/test_runicode.py
--- a/pypy/rlib/test/test_runicode.py
+++ b/pypy/rlib/test/test_runicode.py
@@ -728,3 +728,18 @@
res = interpret(f, [0x10140])
assert res == 0x10140
+
+ def test_encode_surrogate_pair(self):
+ u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00)
+ if runicode.MAXUNICODE < 65536:
+ # Narrow unicode build, consider utf16 surrogate pairs
+ assert runicode.unicode_encode_unicode_escape(
+ u, len(u), True) == r'\U00010000'
+ assert runicode.unicode_encode_raw_unicode_escape(
+ u, len(u), True) == r'\U00010000'
+ else:
+ # Wide unicode build, don't merge utf16 surrogate pairs
+ assert runicode.unicode_encode_unicode_escape(
+ u, len(u), True) == r'\ud800\udc00'
+ assert runicode.unicode_encode_raw_unicode_escape(
+ u, len(u), True) == r'\ud800\udc00'
diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py
--- a/pypy/rpython/lltypesystem/lltype.py
+++ b/pypy/rpython/lltypesystem/lltype.py
@@ -1180,7 +1180,7 @@
try:
return self._lookup_adtmeth(field_name)
except AttributeError:
- raise AttributeError("%r instance has no field %r" % (self._T._name,
+ raise AttributeError("%r instance has no field %r" % (self._T,
field_name))
def __setattr__(self, field_name, val):
diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py
--- a/pypy/rpython/tool/rffi_platform.py
+++ b/pypy/rpython/tool/rffi_platform.py
@@ -379,7 +379,7 @@
self.name = name
def prepare_code(self):
- yield 'if ((%s) < 0) {' % (self.name,)
+ yield 'if ((%s) <= 0) {' % (self.name,)
yield ' long long x = (long long)(%s);' % (self.name,)
yield ' printf("value: %lld\\n", x);'
yield '} else {'
@@ -401,7 +401,7 @@
def prepare_code(self):
yield '#ifdef %s' % self.macro
yield 'dump("defined", 1);'
- yield 'if ((%s) < 0) {' % (self.macro,)
+ yield 'if ((%s) <= 0) {' % (self.macro,)
yield ' long long x = (long long)(%s);' % (self.macro,)
yield ' printf("value: %lld\\n", x);'
yield '} else {'
diff --git a/pypy/tool/compare_last_builds.py b/pypy/tool/compare_last_builds.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/compare_last_builds.py
@@ -0,0 +1,122 @@
+import os
+import urllib2
+import json
+import sys
+import md5
+
+wanted = sys.argv[1:]
+if not wanted:
+ wanted = ['default']
+base = "http://buildbot.pypy.org/json/builders/"
+
+cachedir = os.environ.get('PYPY_BUILDS_CACHE')
+if cachedir and not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+
+
+def get_json(url, cache=cachedir):
+ return json.loads(get_data(url, cache))
+
+
+def get_data(url, cache=cachedir):
+ url = str(url)
+ if cache:
+ digest = md5.md5()
+ digest.update(url)
+ digest = digest.hexdigest()
+ cachepath = os.path.join(cachedir, digest)
+ if os.path.exists(cachepath):
+ with open(cachepath) as fp:
+ return fp.read()
+
+ print 'GET', url
+ fp = urllib2.urlopen(url)
+ try:
+ data = fp.read()
+ if cache:
+ with open(cachepath, 'wb') as cp:
+ cp.write(data)
+ return data
+ finally:
+ fp.close()
+
+def parse_log(log):
+ items = []
+ for v in log.splitlines(1):
+ if not v[0].isspace() and v[1].isspace():
+ items.append(v)
+ return sorted(items) #sort cause testrunner order is non-deterministic
+
+def gather_logdata(build):
+ logdata = get_data(str(build['log']) + '?as_text=1')
+ logdata = logdata.replace('</span><span class="stdout">', '')
+ logdata = logdata.replace('</span></pre>', '')
+ del build['log']
+ build['log'] = parse_log(logdata)
+
+
+def branch_mapping(l):
+ keep = 3 - len(wanted)
+ d = {}
+ for x in reversed(l):
+ gather_logdata(x)
+ if not x['log']:
+ continue
+ b = x['branch']
+ if b not in d:
+ d[b] = []
+ d[b].insert(0, x)
+ if len(d[b]) > keep:
+ d[b].pop()
+ return d
+
+def cleanup_build(d):
+ for a in 'times eta steps slave reason sourceStamp blame currentStep text'.split():
+ del d[a]
+
+ props = d.pop(u'logs')
+ for name, val in props:
+ if name == u'pytestLog':
+ d['log'] = val
+ props = d.pop(u'properties')
+ for name, val, _ in props:
+ if name == u'branch':
+ d['branch'] = val or 'default'
+ return d
+
+def collect_builds(d):
+ name = str(d['basedir'])
+ builds = d['cachedBuilds']
+ l = []
+ for build in builds:
+ d = get_json(base + '%s/builds/%s' % (name, build))
+ cleanup_build(d)
+ l.append(d)
+
+ l = [x for x in l if x['branch'] in wanted and 'log' in x]
+ d = branch_mapping(l)
+ return [x for lst in d.values() for x in lst]
+
+
+def only_linux32(d):
+ return d['own-linux-x86-32']
+
+
+own_builds = get_json(base, cache=False)['own-linux-x86-32']
+
+builds = collect_builds(own_builds)
+
+
+builds.sort(key=lambda x: (wanted.index(x['branch']), x['number']))
+logs = [x.pop('log') for x in builds]
+for b, s in zip(builds, logs):
+ b['resultset'] = len(s)
+import pprint
+pprint.pprint(builds)
+
+from difflib import Differ
+
+for x in Differ().compare(*logs):
+ if x[0]!=' ':
+ sys.stdout.write(x)
diff --git a/pypy/tool/pytest/pypy_test_failure_demo.py b/pypy/tool/pytest/pypy_test_failure_demo.py
--- a/pypy/tool/pytest/pypy_test_failure_demo.py
+++ b/pypy/tool/pytest/pypy_test_failure_demo.py
@@ -8,6 +8,10 @@
def test_interp_func(space):
assert space.is_true(space.w_None)
+def test_interp_reinterpret(space):
+ a = 1
+ assert a == 2
+
class TestInterpTest:
def test_interp_method(self):
assert self.space.is_true(self.space.w_False)
diff --git a/pypy/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
--- a/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
+++ b/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
@@ -210,15 +210,15 @@
#define BEGIN_CODECS_LIST /* empty */
#define _CODEC(name) \
- static const MultibyteCodec _pypy_cjkcodec_##name; \
- const MultibyteCodec *pypy_cjkcodec_##name(void) { \
+ static MultibyteCodec _pypy_cjkcodec_##name; \
+ MultibyteCodec *pypy_cjkcodec_##name(void) { \
if (_pypy_cjkcodec_##name.codecinit != NULL) { \
int r = _pypy_cjkcodec_##name.codecinit(_pypy_cjkcodec_##name.config); \
assert(r == 0); \
} \
return &_pypy_cjkcodec_##name; \
} \
- static const MultibyteCodec _pypy_cjkcodec_##name
+ static MultibyteCodec _pypy_cjkcodec_##name
#define _STATEFUL_METHODS(enc) \
enc##_encode, \
enc##_encode_init, \
diff --git a/pypy/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/translator/c/src/cjkcodecs/multibytecodec.h
--- a/pypy/translator/c/src/cjkcodecs/multibytecodec.h
+++ b/pypy/translator/c/src/cjkcodecs/multibytecodec.h
@@ -131,7 +131,7 @@
/* list of codecs defined in the .c files */
#define DEFINE_CODEC(name) \
- const MultibyteCodec *pypy_cjkcodec_##name(void);
+ MultibyteCodec *pypy_cjkcodec_##name(void);
// _codecs_cn
DEFINE_CODEC(gb2312)
More information about the pypy-commit
mailing list