[pypy-commit] pypy default: merge
devin.jeanpierre
pypy.commits at gmail.com
Thu Apr 28 17:07:09 EDT 2016
Author: Devin Jeanpierre <jeanpierreda at gmail.com>
Branch:
Changeset: r84017:0f0ab7f5334c
Date: 2016-04-28 13:54 -0700
http://bitbucket.org/pypy/pypy/changeset/0f0ab7f5334c/
Log: merge
diff too long, truncating to 2000 out of 12553 lines
diff --git a/TODO b/TODO
new file mode 100644
--- /dev/null
+++ b/TODO
@@ -0,0 +1,2 @@
+* reduce size of generated c code from slot definitions in slotdefs.
+* remove broken DEBUG_REFCOUNT from pyobject.py
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v<old> -r v<new> Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after <from> <to>` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
# if log is not opened, open it now
if not _S_log_open:
openlog()
+ if isinstance(message, unicode):
+ message = str(message)
lib.syslog(priority, "%s", message)
@builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -222,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method cache ",
default=11),
@@ -265,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -296,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -317,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -108,9 +108,9 @@
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
<function name="BaseFactory" />
</lcgdict>
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation scripts <translate-pypy>`
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter <interpreter>` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details <cppyy>` are `available here <cppyy>`.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,8 +106,12 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
List Optimizations
@@ -114,8 +120,8 @@
created. This gives the memory and speed behaviour of ``xrange`` and the generality
of use of ``range``, and makes ``xrange`` essentially useless.
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
User Class Optimizations
@@ -133,8 +139,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -6,3 +6,40 @@
.. startrev: aa60332382a1
.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
+
+.. branch: gcheader-decl
+
+Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+Update cpyext C-API support:
+ - allow c-snippet tests to be run with -A so we can verify we are compatible
+ - fix many edge cases exposed by fixing tests to run with -A
+ - issequence() logic matches cpython
+ - make PyStringObject and PyUnicodeObject field names compatible with cpython
+ - add prelminary support for PyDateTime_*
+ - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+ PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+ - PyAnySet_CheckExact, PyUnicode_Concat
+ - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+ primitives, also find a case where CPython will allow thread creation
+ before PyEval_InitThreads is run, dissallow on PyPy
+ - create a PyObject-specific list strategy
+ - rewrite slot assignment for typeobjects
+ - improve tracking of PyObject to rpython object mapping
+ - support tp_as_{number, sequence, mapping, buffer} slots
+After this branch, we are almost able to support upstream numpy via cpyext, so
+we created (yet another) fork of numpy at github.com/pypy/numpy with the needed
+changes
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1176,7 +1176,27 @@
return self.w_False
def issequence_w(self, w_obj):
- return (self.findattr(w_obj, self.wrap("__getitem__")) is not None)
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return False
+ elif flag == 'S':
+ return True
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None)
+
+ def ismapping_w(self, w_obj):
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return True
+ elif flag == 'S':
+ return False
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None and
+ self.lookup(w_obj, '__getslice__') is None)
# The code below only works
# for the simple case (new-style instance).
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -214,6 +214,7 @@
self._trace(frame, 'exception', None, operationerr)
#operationerr.print_detailed_traceback(self.space)
+ @jit.dont_look_inside
@specialize.arg(1)
def sys_exc_info(self, for_hidden=False):
"""Implements sys.exc_info().
@@ -225,15 +226,7 @@
# NOTE: the result is not the wrapped sys.exc_info() !!!
"""
- frame = self.gettopframe()
- while frame:
- if frame.last_exception is not None:
- if ((for_hidden or not frame.hide()) or
- frame.last_exception is
- get_cleared_operation_error(self.space)):
- return frame.last_exception
- frame = frame.f_backref()
- return None
+ return self.gettopframe()._exc_info_unroll(self.space, for_hidden)
def set_sys_exc_info(self, operror):
frame = self.gettopframe_nohidden()
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -114,6 +114,7 @@
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
+ from pypy.objspace.std.mapdict import init_mapdict_cache
if self.co_cellvars:
argcount = self.co_argcount
assert argcount >= 0 # annotator hint
@@ -149,9 +150,7 @@
self._compute_flatcall()
- if self.space.config.objspace.std.withmapdict:
- from pypy.objspace.std.mapdict import init_mapdict_cache
- init_mapdict_cache(self)
+ init_mapdict_cache(self)
def _init_ready(self):
"This is a hook for the vmprof module, which overrides this method."
@@ -163,7 +162,10 @@
# When translating PyPy, freeze the file name
# <builtin>/lastdirname/basename.py
# instead of freezing the complete translation-time path.
- filename = self.co_filename.lstrip('<').rstrip('>')
+ filename = self.co_filename
+ if filename.startswith('<builtin>'):
+ return
+ filename = filename.lstrip('<').rstrip('>')
if filename.lower().endswith('.pyc'):
filename = filename[:-1]
basename = os.path.basename(filename)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -4,7 +4,7 @@
from rpython.rlib import jit
from rpython.rlib.debug import make_sure_not_resized, check_nonneg
from rpython.rlib.jit import hint
-from rpython.rlib.objectmodel import we_are_translated, instantiate
+from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated
from rpython.rlib.rarithmetic import intmask, r_uint
from rpython.tool.pairtype import extendabletype
@@ -12,7 +12,8 @@
from pypy.interpreter.argument import Arguments
from pypy.interpreter.astcompiler import consts
from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.error import OperationError, oefmt
+from pypy.interpreter.error import (
+ OperationError, get_cleared_operation_error, oefmt)
from pypy.interpreter.executioncontext import ExecutionContext
from pypy.interpreter.nestedscope import Cell
from pypy.tool import stdlib_opcode
@@ -870,6 +871,22 @@
return space.wrap(self.builtin is not space.builtin)
return space.w_False
+ @jit.unroll_safe
+ @specialize.arg(2)
+ def _exc_info_unroll(self, space, for_hidden=False):
+ """Return the most recent OperationError being handled in the
+ call stack
+ """
+ frame = self
+ while frame:
+ last = frame.last_exception
+ if last is not None:
+ if last is get_cleared_operation_error(self.space):
+ break
+ if for_hidden or not frame.hide():
+ return last
+ frame = frame.f_backref()
+ return None
# ____________________________________________________________
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -739,25 +739,16 @@
unroller = SContinueLoop(startofloop)
return self.unrollstack_and_jump(unroller)
- @jit.unroll_safe
def RAISE_VARARGS(self, nbargs, next_instr):
space = self.space
if nbargs == 0:
- frame = self
- while frame:
- if frame.last_exception is not None:
- operror = frame.last_exception
- break
- frame = frame.f_backref()
- else:
- raise OperationError(space.w_TypeError,
- space.wrap("raise: no active exception to re-raise"))
- if operror.w_type is space.w_None:
- raise OperationError(space.w_TypeError,
- space.wrap("raise: the exception to re-raise was cleared"))
+ last_operr = self._exc_info_unroll(space, for_hidden=True)
+ if last_operr is None:
+ raise oefmt(space.w_TypeError,
+ "No active exception to reraise")
# re-raise, no new traceback obj will be attached
- self.last_exception = operror
- raise RaiseWithExplicitTraceback(operror)
+ self.last_exception = last_operr
+ raise RaiseWithExplicitTraceback(last_operr)
w_value = w_traceback = space.w_None
if nbargs >= 3:
@@ -951,8 +942,7 @@
def LOAD_ATTR(self, nameindex, next_instr):
"obj.attributename"
w_obj = self.popvalue()
- if (self.space.config.objspace.std.withmapdict
- and not jit.we_are_jitted()):
+ if not jit.we_are_jitted():
from pypy.objspace.std.mapdict import LOAD_ATTR_caching
w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex)
else:
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -98,175 +98,51 @@
# reason is that it is missing a place to store the __dict__, the slots,
# the weakref lifeline, and it typically has no interp-level __del__.
# So we create a few interp-level subclasses of W_XxxObject, which add
-# some combination of features.
-#
-# We don't build 2**4 == 16 subclasses for all combinations of requested
-# features, but limit ourselves to 6, chosen a bit arbitrarily based on
-# typical usage (case 1 is the most common kind of app-level subclasses;
-# case 2 is the memory-saving kind defined with __slots__).
-#
-# +----------------------------------------------------------------+
-# | NOTE: if withmapdict is enabled, the following doesn't apply! |
-# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to |
-# | show up only when needed. In particular there is no way with |
-# | mapdict to prevent some objects from being weakrefable. |
-# +----------------------------------------------------------------+
-#
-# dict slots del weakrefable
-#
-# 1. Y N N Y UserDictWeakref
-# 2. N Y N N UserSlots
-# 3. Y Y N Y UserDictWeakrefSlots
-# 4. N Y N Y UserSlotsWeakref
-# 5. Y Y Y Y UserDictWeakrefSlotsDel
-# 6. N Y Y Y UserSlotsWeakrefDel
-#
-# Note that if the app-level explicitly requests no dict, we should not
-# provide one, otherwise storing random attributes on the app-level
-# instance would unexpectedly work. We don't care too much, though, if
-# an object is weakrefable when it shouldn't really be. It's important
-# that it has a __del__ only if absolutely needed, as this kills the
-# performance of the GCs.
-#
-# Interp-level inheritance is like this:
-#
-# W_XxxObject base
-# / \
-# 1 2
-# / \
-# 3 4
-# / \
-# 5 6
+# some combination of features. This is done using mapdict.
-def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots,
- needsdel=False, weakrefable=False):
+# we need two subclasses of the app-level type, one to add mapdict, and then one
+# to add del to not slow down the GC.
+
+def get_unique_interplevel_subclass(config, cls, needsdel=False):
"NOT_RPYTHON: initialization-time only"
if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = config, cls, hasdict, wants_slots, needsdel, weakrefable
+ key = config, cls, needsdel
try:
return _subclass_cache[key]
except KeyError:
- subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel,
- weakrefable)
+ # XXX can save a class if cls already has a __del__
+ if needsdel:
+ cls = get_unique_interplevel_subclass(config, cls, False)
+ subcls = _getusercls(config, cls, needsdel)
assert key not in _subclass_cache
_subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
-def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable):
+def _getusercls(config, cls, wants_del, reallywantdict=False):
+ from rpython.rlib import objectmodel
+ from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
+ MapdictDictSupport, MapdictWeakrefSupport,
+ _make_storage_mixin_size_n)
typedef = cls.typedef
- if wants_dict and typedef.hasdict:
- wants_dict = False
- if config.objspace.std.withmapdict and not typedef.hasdict:
- # mapdict only works if the type does not already have a dict
- if wants_del:
- parentcls = get_unique_interplevel_subclass(config, cls, True, True,
- False, True)
- return _usersubclswithfeature(config, parentcls, "del")
- return _usersubclswithfeature(config, cls, "user", "dict", "weakref", "slots")
- # Forest of if's - see the comment above.
+ name = cls.__name__ + "User"
+
+ mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()]
+ if reallywantdict or not typedef.hasdict:
+ # the type has no dict, mapdict to provide the dict
+ mixins_needed.append(MapdictDictSupport)
+ name += "Dict"
+ if not typedef.weakrefable:
+ # the type does not support weakrefs yet, mapdict to provide weakref
+ # support
+ mixins_needed.append(MapdictWeakrefSupport)
+ name += "Weakrefable"
if wants_del:
- if wants_dict:
- # case 5. Parent class is 3.
- parentcls = get_unique_interplevel_subclass(config, cls, True, True,
- False, True)
- else:
- # case 6. Parent class is 4.
- parentcls = get_unique_interplevel_subclass(config, cls, False, True,
- False, True)
- return _usersubclswithfeature(config, parentcls, "del")
- elif wants_dict:
- if wants_slots:
- # case 3. Parent class is 1.
- parentcls = get_unique_interplevel_subclass(config, cls, True, False,
- False, True)
- return _usersubclswithfeature(config, parentcls, "slots")
- else:
- # case 1 (we need to add weakrefable unless it's already in 'cls')
- if not typedef.weakrefable:
- return _usersubclswithfeature(config, cls, "user", "dict", "weakref")
- else:
- return _usersubclswithfeature(config, cls, "user", "dict")
- else:
- if weakrefable and not typedef.weakrefable:
- # case 4. Parent class is 2.
- parentcls = get_unique_interplevel_subclass(config, cls, False, True,
- False, False)
- return _usersubclswithfeature(config, parentcls, "weakref")
- else:
- # case 2 (if the base is already weakrefable, case 2 == case 4)
- return _usersubclswithfeature(config, cls, "user", "slots")
-
-def _usersubclswithfeature(config, parentcls, *features):
- key = config, parentcls, features
- try:
- return _usersubclswithfeature_cache[key]
- except KeyError:
- subcls = _builduserclswithfeature(config, parentcls, *features)
- _usersubclswithfeature_cache[key] = subcls
- return subcls
-_usersubclswithfeature_cache = {}
-_allusersubcls_cache = {}
-
-def _builduserclswithfeature(config, supercls, *features):
- "NOT_RPYTHON: initialization-time only"
- name = supercls.__name__
- name += ''.join([name.capitalize() for name in features])
- body = {}
- #print '..........', name, '(', supercls.__name__, ')'
-
- def add(Proto):
- for key, value in Proto.__dict__.items():
- if (not key.startswith('__') and not key.startswith('_mixin_')
- or key == '__del__'):
- if hasattr(value, "func_name"):
- value = func_with_new_name(value, value.func_name)
- body[key] = value
-
- if (config.objspace.std.withmapdict and "dict" in features):
- from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin
- add(BaseMapdictObject)
- add(ObjectMixin)
- body["user_overridden_class"] = True
- features = ()
-
- if "user" in features: # generic feature needed by all subcls
-
- class Proto(object):
- user_overridden_class = True
-
- def getclass(self, space):
- return promote(self.w__class__)
-
- def setclass(self, space, w_subtype):
- # only used by descr_set___class__
- self.w__class__ = w_subtype
-
- def user_setup(self, space, w_subtype):
- self.space = space
- self.w__class__ = w_subtype
- self.user_setup_slots(w_subtype.layout.nslots)
-
- def user_setup_slots(self, nslots):
- assert nslots == 0
- add(Proto)
-
- if "weakref" in features:
- class Proto(object):
- _lifeline_ = None
- def getweakref(self):
- return self._lifeline_
- def setweakref(self, space, weakreflifeline):
- self._lifeline_ = weakreflifeline
- def delweakref(self):
- self._lifeline_ = None
- add(Proto)
-
- if "del" in features:
- parent_destructor = getattr(supercls, '__del__', None)
+ name += "Del"
+ parent_destructor = getattr(cls, '__del__', None)
def call_parent_del(self):
assert isinstance(self, subcls)
parent_destructor(self)
@@ -281,57 +157,16 @@
if parent_destructor is not None:
self.enqueue_for_destruction(self.space, call_parent_del,
'internal destructor of ')
- add(Proto)
+ mixins_needed.append(Proto)
- if "slots" in features:
- class Proto(object):
- slots_w = []
- def user_setup_slots(self, nslots):
- if nslots > 0:
- self.slots_w = [None] * nslots
- def setslotvalue(self, index, w_value):
- self.slots_w[index] = w_value
- def delslotvalue(self, index):
- if self.slots_w[index] is None:
- return False
- self.slots_w[index] = None
- return True
- def getslotvalue(self, index):
- return self.slots_w[index]
- add(Proto)
-
- if "dict" in features:
- base_user_setup = supercls.user_setup.im_func
- if "user_setup" in body:
- base_user_setup = body["user_setup"]
- class Proto(object):
- def getdict(self, space):
- return self.w__dict__
-
- def setdict(self, space, w_dict):
- self.w__dict__ = check_new_dictionary(space, w_dict)
-
- def user_setup(self, space, w_subtype):
- self.w__dict__ = space.newdict(
- instance=True)
- base_user_setup(self, space, w_subtype)
-
- add(Proto)
-
- subcls = type(name, (supercls,), body)
- _allusersubcls_cache[subcls] = True
+ class subcls(cls):
+ user_overridden_class = True
+ for base in mixins_needed:
+ objectmodel.import_from_mixin(base)
+ del subcls.base
+ subcls.__name__ = name
return subcls
-# a couple of helpers for the Proto classes above, factored out to reduce
-# the translated code size
-def check_new_dictionary(space, w_dict):
- if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting dictionary to a non-dict"))
- from pypy.objspace.std import dictmultiobject
- assert isinstance(w_dict, dictmultiobject.W_DictMultiObject)
- return w_dict
-check_new_dictionary._dont_inline_ = True
# ____________________________________________________________
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -87,7 +87,7 @@
howmany = get_len_of_range(space, start, stop, step)
- if space.config.objspace.std.withrangelist:
+ if space.config.objspace.std.withliststrategies:
return range_withspecialized_implementation(space, start,
step, howmany)
res_w = [None] * howmany
@@ -99,7 +99,7 @@
def range_withspecialized_implementation(space, start, step, length):
- assert space.config.objspace.std.withrangelist
+ assert space.config.objspace.std.withliststrategies
from pypy.objspace.std.listobject import make_range_list
return make_range_list(space, start, step, length)
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -185,12 +185,19 @@
class Cache:
def __init__(self, space):
- from pypy.interpreter.typedef import _usersubclswithfeature
- # evil
- self.cls_without_del = _usersubclswithfeature(
- space.config, W_InstanceObject, "dict", "weakref")
- self.cls_with_del = _usersubclswithfeature(
- space.config, self.cls_without_del, "del")
+ from pypy.interpreter.typedef import _getusercls
+
+ if hasattr(space, 'is_fake_objspace'):
+ # hack: with the fake objspace, we don't want to see typedef's
+ # _getusercls() at all
+ self.cls_without_del = W_InstanceObject
+ self.cls_with_del = W_InstanceObject
+ return
+
+ self.cls_without_del = _getusercls(
+ space.config, W_InstanceObject, False, reallywantdict=True)
+ self.cls_with_del = _getusercls(
+ space.config, W_InstanceObject, True, reallywantdict=True)
def class_descr_call(space, w_self, __args__):
diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -748,10 +748,6 @@
raises(TypeError, delattr, A(), 42)
-class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr):
- spaceconfig = {"objspace.std.getattributeshortcut": True}
-
-
class TestInternal:
def test_execfile(self, space):
fn = str(udir.join('test_execfile'))
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -1118,8 +1118,7 @@
assert getattr(c, u"x") == 1
-class AppTestOldStyleMapDict(AppTestOldstyle):
- spaceconfig = {"objspace.std.withmapdict": True}
+class AppTestOldStyleMapDict:
def setup_class(cls):
if cls.runappdirect:
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -110,9 +110,8 @@
'interp_magic.method_cache_counter')
self.extra_interpdef('reset_method_cache_counter',
'interp_magic.reset_method_cache_counter')
- if self.space.config.objspace.std.withmapdict:
- self.extra_interpdef('mapdict_cache_counter',
- 'interp_magic.mapdict_cache_counter')
+ self.extra_interpdef('mapdict_cache_counter',
+ 'interp_magic.mapdict_cache_counter')
PYC_MAGIC = get_pyc_magic(self.space)
self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC)
try:
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -37,17 +37,15 @@
cache = space.fromcache(MethodCache)
cache.misses = {}
cache.hits = {}
- if space.config.objspace.std.withmapdict:
- cache = space.fromcache(MapAttrCache)
- cache.misses = {}
- cache.hits = {}
+ cache = space.fromcache(MapAttrCache)
+ cache.misses = {}
+ cache.hits = {}
@unwrap_spec(name=str)
def mapdict_cache_counter(space, name):
"""Return a tuple (index_cache_hits, index_cache_misses) for lookups
in the mapdict cache with the given attribute name."""
assert space.config.objspace.std.withmethodcachecounter
- assert space.config.objspace.std.withmapdict
cache = space.fromcache(MapAttrCache)
return space.newtuple([space.newint(cache.hits.get(name, 0)),
space.newint(cache.misses.get(name, 0))])
diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py
--- a/pypy/module/__pypy__/test/test_special.py
+++ b/pypy/module/__pypy__/test/test_special.py
@@ -1,8 +1,7 @@
import py
class AppTest(object):
- spaceconfig = {"objspace.usemodules.select": False,
- "objspace.std.withrangelist": True}
+ spaceconfig = {"objspace.usemodules.select": False}
def setup_class(cls):
if cls.runappdirect:
@@ -61,6 +60,7 @@
import __pypy__
import sys
+ result = [False]
@__pypy__.hidden_applevel
def test_hidden_with_tb():
def not_hidden(): 1/0
@@ -69,9 +69,11 @@
assert sys.exc_info() == (None, None, None)
tb = __pypy__.get_hidden_tb()
assert tb.tb_frame.f_code.co_name == 'not_hidden'
- return True
+ result[0] = True
+ raise
else: return False
- assert test_hidden_with_tb()
+ raises(ZeroDivisionError, test_hidden_with_tb)
+ assert result[0]
def test_lookup_special(self):
from __pypy__ import lookup_special
diff --git a/pypy/module/_cffi_backend/wrapper.py b/pypy/module/_cffi_backend/wrapper.py
--- a/pypy/module/_cffi_backend/wrapper.py
+++ b/pypy/module/_cffi_backend/wrapper.py
@@ -92,7 +92,8 @@
return ctype._call(self.fnptr, args_w)
def descr_repr(self, space):
- return space.wrap("<FFIFunctionWrapper for %s()>" % (self.fnname,))
+ doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
+ return space.wrap("<FFIFunctionWrapper '%s'>" % (doc,))
def descr_get_doc(self, space):
doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -37,6 +37,8 @@
from rpython.tool.sourcetools import func_with_new_name
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib import rawrefcount
+from rpython.rlib import rthread
+from rpython.rlib.debug import fatalerror_notb
DEBUG_WRAPPER = True
@@ -85,11 +87,13 @@
FILEP = rffi.COpaquePtr('FILE')
if sys.platform == 'win32':
- fileno = rffi.llexternal('_fileno', [FILEP], rffi.INT)
+ dash = '_'
else:
- fileno = rffi.llexternal('fileno', [FILEP], rffi.INT)
-
+ dash = ''
+fileno = rffi.llexternal(dash + 'fileno', [FILEP], rffi.INT)
fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
+fdopen = rffi.llexternal(dash + 'fdopen', [rffi.INT, CONST_STRING],
+ FILEP, save_err=rffi.RFFI_SAVE_ERRNO)
_fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
def fclose(fp):
@@ -119,16 +123,18 @@
def is_valid_fp(fp):
return is_valid_fd(fileno(fp))
+pypy_decl = 'pypy_decl.h'
+
constant_names = """
Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER
-METH_COEXIST METH_STATIC METH_CLASS
+METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE
METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O
Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS
Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES
""".split()
for name in constant_names:
setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
-udir.join('pypy_decl.h').write("/* Will be filled later */\n")
+udir.join(pypy_decl).write("/* Will be filled later */\n")
udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n")
udir.join('pypy_macros.h').write("/* Will be filled later */\n")
globals().update(rffi_platform.configure(CConfig_constants))
@@ -144,7 +150,7 @@
target.chmod(0444) # make the file read-only, to make sure that nobody
# edits it by mistake
-def copy_header_files(dstdir):
+def copy_header_files(dstdir, copy_numpy_headers):
# XXX: 20 lines of code to recursively copy a directory, really??
assert dstdir.check(dir=True)
headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl')
@@ -152,6 +158,18 @@
headers.append(udir.join(name))
_copy_header_files(headers, dstdir)
+ if copy_numpy_headers:
+ try:
+ dstdir.mkdir('numpy')
+ except py.error.EEXIST:
+ pass
+ numpy_dstdir = dstdir / 'numpy'
+
+ numpy_include_dir = include_dir / 'numpy'
+ numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl')
+ _copy_header_files(numpy_headers, numpy_dstdir)
+
+
class NotSpecified(object):
pass
_NOT_SPECIFIED = NotSpecified()
@@ -177,6 +195,61 @@
# exceptions generate a OperationError(w_SystemError); and the funtion returns
# the error value specifed in the API.
#
+# Handling of the GIL
+# -------------------
+#
+# We add a global variable 'cpyext_glob_tid' that contains a thread
+# id. Invariant: this variable always contain 0 when the PyPy GIL is
+# released. It should also contain 0 when regular RPython code
+# executes. In non-cpyext-related code, it will thus always be 0.
+#
+# **make_generic_cpy_call():** RPython to C, with the GIL held. Before
+# the call, must assert that the global variable is 0 and set the
+# current thread identifier into the global variable. After the call,
+# assert that the global variable still contains the current thread id,
+# and reset it to 0.
+#
+# **make_wrapper():** C to RPython; by default assume that the GIL is
+# held, but accepts gil="acquire", "release", "around",
+# "pygilstate_ensure", "pygilstate_release".
+#
+# When a wrapper() is called:
+#
+# * "acquire": assert that the GIL is not currently held, i.e. the
+# global variable does not contain the current thread id (otherwise,
+# deadlock!). Acquire the PyPy GIL. After we acquired it, assert
+# that the global variable is 0 (it must be 0 according to the
+# invariant that it was 0 immediately before we acquired the GIL,
+# because the GIL was released at that point).
+#
+# * gil=None: we hold the GIL already. Assert that the current thread
+# identifier is in the global variable, and replace it with 0.
+#
+# * "pygilstate_ensure": if the global variable contains the current
+# thread id, replace it with 0 and set the extra arg to 0. Otherwise,
+# do the "acquire" and set the extra arg to 1. Then we'll call
+# pystate.py:PyGILState_Ensure() with this extra arg, which will do
+# the rest of the logic.
+#
+# When a wrapper() returns, first assert that the global variable is
+# still 0, and then:
+#
+# * "release": release the PyPy GIL. The global variable was 0 up to
+# and including at the point where we released the GIL, but afterwards
+# it is possible that the GIL is acquired by a different thread very
+# quickly.
+#
+# * gil=None: we keep holding the GIL. Set the current thread
+# identifier into the global variable.
+#
+# * "pygilstate_release": if the argument is PyGILState_UNLOCKED,
+# release the PyPy GIL; otherwise, set the current thread identifier
+# into the global variable. The rest of the logic of
+# PyGILState_Release() should be done before, in pystate.py.
+
+cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1,
+ flavor='raw', immortal=True, zero=True)
+
cpyext_namespace = NameManager('cpyext_')
@@ -196,6 +269,9 @@
argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code)
assert argnames[0] == 'space'
+ if gil == 'pygilstate_ensure':
+ assert argnames[-1] == 'previous_state'
+ del argnames[-1]
self.argnames = argnames[1:]
assert len(self.argnames) == len(self.argtypes)
self.gil = gil
@@ -414,15 +490,14 @@
'PyThread_acquire_lock', 'PyThread_release_lock',
'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value',
'PyThread_get_key_value', 'PyThread_delete_key_value',
- 'PyThread_ReInitTLS',
+ 'PyThread_ReInitTLS', 'PyThread_init_thread',
+ 'PyThread_start_new_thread',
'PyStructSequence_InitType', 'PyStructSequence_New',
'PyStructSequence_UnnamedField',
'PyFunction_Type', 'PyMethod_Type', 'PyRange_Type', 'PyTraceBack_Type',
- 'PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', '_PyArray_CopyInto',
-
'Py_DebugFlag', 'Py_VerboseFlag', 'Py_InteractiveFlag', 'Py_InspectFlag',
'Py_OptimizeFlag', 'Py_NoSiteFlag', 'Py_BytesWarningFlag', 'Py_UseClassExceptionsFlag',
'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag',
@@ -431,11 +506,11 @@
]
TYPES = {}
GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur
- '_Py_NoneStruct#': ('PyObject*', 'space.w_None'),
- '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'),
- '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'),
- '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'),
- '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'),
+ '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'),
+ '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'),
+ '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'),
+ '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'),
+ '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'),
'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'),
}
FORWARD_DECLS = []
@@ -461,6 +536,7 @@
"PyUnicode_Type": "space.w_unicode",
"PyBaseString_Type": "space.w_basestring",
"PyDict_Type": "space.w_dict",
+ "PyDictProxy_Type": "cpyext.dictobject.make_frozendict(space)",
"PyTuple_Type": "space.w_tuple",
"PyList_Type": "space.w_list",
"PySet_Type": "space.w_set",
@@ -484,7 +560,7 @@
'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)',
'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)'
}.items():
- GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr)
+ GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr)
for cpyname in '''PyMethodObject PyListObject PyLongObject
PyDictObject PyClassObject'''.split():
@@ -602,7 +678,14 @@
fatal_value = callable.api_func.restype._defl()
gil_acquire = (gil == "acquire" or gil == "around")
gil_release = (gil == "release" or gil == "around")
- assert gil is None or gil_acquire or gil_release
+ pygilstate_ensure = (gil == "pygilstate_ensure")
+ pygilstate_release = (gil == "pygilstate_release")
+ assert (gil is None or gil_acquire or gil_release
+ or pygilstate_ensure or pygilstate_release)
+ deadlock_error = ("GIL deadlock detected when a CPython C extension "
+ "module calls %r" % (callable.__name__,))
+ no_gil_error = ("GIL not held when a CPython C extension "
+ "module calls %r" % (callable.__name__,))
@specialize.ll()
def wrapper(*args):
@@ -610,8 +693,27 @@
from pypy.module.cpyext.pyobject import as_pyobj
# we hope that malloc removal removes the newtuple() that is
# inserted exactly here by the varargs specializer
+
+ # see "Handling of the GIL" above (careful, we don't have the GIL here)
+ tid = rthread.get_or_make_ident()
if gil_acquire:
+ if cpyext_glob_tid_ptr[0] == tid:
+ fatalerror_notb(deadlock_error)
rgil.acquire()
+ assert cpyext_glob_tid_ptr[0] == 0
+ elif pygilstate_ensure:
+ from pypy.module.cpyext import pystate
+ if cpyext_glob_tid_ptr[0] == tid:
+ cpyext_glob_tid_ptr[0] = 0
+ args += (pystate.PyGILState_LOCKED,)
+ else:
+ rgil.acquire()
+ args += (pystate.PyGILState_UNLOCKED,)
+ else:
+ if cpyext_glob_tid_ptr[0] != tid:
+ fatalerror_notb(no_gil_error)
+ cpyext_glob_tid_ptr[0] = 0
+
rffi.stackcounter.stacks_counter += 1
llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py
retval = fatal_value
@@ -620,7 +722,8 @@
try:
if not we_are_translated() and DEBUG_WRAPPER:
print >>sys.stderr, callable,
- assert len(args) == len(callable.api_func.argtypes)
+ assert len(args) == (len(callable.api_func.argtypes) +
+ pygilstate_ensure)
for i, (typ, is_wrapped) in argtypes_enum_ui:
arg = args[i]
if is_PyObject(typ) and is_wrapped:
@@ -629,6 +732,8 @@
else:
arg_conv = arg
boxed_args += (arg_conv, )
+ if pygilstate_ensure:
+ boxed_args += (args[-1], )
state = space.fromcache(State)
try:
result = callable(space, *boxed_args)
@@ -688,8 +793,20 @@
pypy_debug_catch_fatal_exception()
assert False
rffi.stackcounter.stacks_counter -= 1
- if gil_release:
+
+ # see "Handling of the GIL" above
+ assert cpyext_glob_tid_ptr[0] == 0
+ if pygilstate_release:
+ from pypy.module.cpyext import pystate
+ arg = rffi.cast(lltype.Signed, args[-1])
+ unlock = (arg == pystate.PyGILState_UNLOCKED)
+ else:
+ unlock = gil_release
+ if unlock:
rgil.release()
+ else:
+ cpyext_glob_tid_ptr[0] = tid
+
return retval
callable._always_inline_ = 'try'
wrapper.__name__ = "wrapper for %r" % (callable, )
@@ -782,6 +899,9 @@
structindex = {}
for header, header_functions in FUNCTIONS_BY_HEADER.iteritems():
for name, func in header_functions.iteritems():
+ if not func:
+ # added only for the macro, not the decl
+ continue
restype, args = c_function_signature(db, func)
members.append('%s (*%s)(%s);' % (restype, name, args))
structindex[name] = len(structindex)
@@ -798,7 +918,7 @@
global_objects = []
for name, (typ, expr) in GLOBALS.iteritems():
- if "#" in name:
+ if '#' in name:
continue
if typ == 'PyDateTime_CAPI*':
continue
@@ -822,7 +942,7 @@
'\n' +
'\n'.join(functions))
- eci = build_eci(True, export_symbols, code)
+ eci = build_eci(True, export_symbols, code, use_micronumpy)
eci = eci.compile_shared_lib(
outputfilename=str(udir / "module_cache" / "pypyapi"))
modulename = py.path.local(eci.libraries[-1])
@@ -834,7 +954,7 @@
ob = rawrefcount.next_dead(PyObject)
if not ob:
break
- print ob
+ print 'deallocating PyObject', ob
decref(space, ob)
print 'dealloc_trigger DONE'
return "RETRY"
@@ -853,8 +973,8 @@
for name, (typ, expr) in GLOBALS.iteritems():
from pypy.module import cpyext # for the eval() below
w_obj = eval(expr)
- if name.endswith('#'):
- name = name[:-1]
+ if '#' in name:
+ name = name.split('#')[0]
isptr = False
else:
isptr = True
@@ -899,7 +1019,7 @@
# ctypes.c_void_p)
for header, header_functions in FUNCTIONS_BY_HEADER.iteritems():
for name, func in header_functions.iteritems():
- if name.startswith('cpyext_'): # XXX hack
+ if name.startswith('cpyext_') or func is None: # XXX hack
continue
pypyAPI[structindex[name]] = ctypes.cast(
ll2ctypes.lltype2ctypes(func.get_llhelper(space)),
@@ -952,6 +1072,8 @@
cpyext_type_init = self.cpyext_type_init
self.cpyext_type_init = None
for pto, w_type in cpyext_type_init:
+ if space.is_w(w_type, space.w_str):
+ pto.c_tp_itemsize = 1
finish_type_1(space, pto)
finish_type_2(space, pto, w_type)
@@ -969,10 +1091,14 @@
pypy_macros = []
renamed_symbols = []
for name in export_symbols:
- name = name.replace("#", "")
+ if '#' in name:
+ name,header = name.split('#')
+ else:
+ header = pypy_decl
newname = mangle_name(prefix, name)
assert newname, name
- pypy_macros.append('#define %s %s' % (name, newname))
+ if header == pypy_decl:
+ pypy_macros.append('#define %s %s' % (name, newname))
if name.startswith("PyExc_"):
pypy_macros.append('#define _%s _%s' % (name, newname))
renamed_symbols.append(newname)
@@ -1001,7 +1127,7 @@
# implement function callbacks and generate function decls
functions = []
decls = {}
- pypy_decls = decls['pypy_decl.h'] = []
+ pypy_decls = decls[pypy_decl] = []
pypy_decls.append('#define Signed long /* xxx temporary fix */\n')
pypy_decls.append('#define Unsigned unsigned long /* xxx temporary fix */\n')
@@ -1017,6 +1143,8 @@
header = decls[header_name]
for name, func in sorted(header_functions.iteritems()):
+ if not func:
+ continue
if header == DEFAULT_HEADER:
_name = name
else:
@@ -1042,12 +1170,15 @@
functions.append(header + '\n{return va_arg(*vp, %s);}\n' % name)
for name, (typ, expr) in GLOBALS.iteritems():
- if name.endswith('#'):
- name = name.replace("#", "")
+ if '#' in name:
+ name, header = name.split("#")
typ = typ.replace("*", "")
elif name.startswith('PyExc_'):
typ = 'PyObject*'
- pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name))
+ header = pypy_decl
+ if header != pypy_decl:
+ decls[header].append('#define %s %s' % (name, mangle_name(prefix, name)))
+ decls[header].append('PyAPI_DATA(%s) %s;' % (typ, name))
for header_name in FUNCTIONS_BY_HEADER.keys():
header = decls[header_name]
@@ -1075,9 +1206,10 @@
source_dir / "pysignals.c",
source_dir / "pythread.c",
source_dir / "missing.c",
+ source_dir / "pymem.c",
]
-def build_eci(building_bridge, export_symbols, code):
+def build_eci(building_bridge, export_symbols, code, use_micronumpy=False):
"NOT_RPYTHON"
# Build code and get pointer to the structure
kwds = {}
@@ -1099,9 +1231,11 @@
# Generate definitions for global structures
structs = ["#include <Python.h>"]
+ if use_micronumpy:
+ structs.append('#include <pypy_numpy.h> /* api.py line 1223 */')
for name, (typ, expr) in GLOBALS.iteritems():
- if name.endswith('#'):
- structs.append('%s %s;' % (typ[:-1], name[:-1]))
+ if '#' in name:
+ structs.append('%s %s;' % (typ[:-1], name.split('#')[0]))
elif name.startswith('PyExc_'):
structs.append('PyTypeObject _%s;' % (name,))
structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name))
@@ -1142,11 +1276,12 @@
use_micronumpy = space.config.objspace.usemodules.micronumpy
if not use_micronumpy:
return use_micronumpy
- # import to register api functions by side-effect
- import pypy.module.cpyext.ndarrayobject
- global GLOBALS, SYMBOLS_C, separate_module_files
- GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)")
- SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']
+ # import registers api functions by side-effect, we also need HEADER
+ from pypy.module.cpyext.ndarrayobject import HEADER
+ global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files
+ for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']:
+ FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None
+ GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)")
separate_module_files.append(source_dir / "ndarrayobject.c")
return use_micronumpy
@@ -1156,14 +1291,18 @@
export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS)
from rpython.translator.c.database import LowLevelDatabase
db = LowLevelDatabase()
+ prefix = 'PyPy'
- generate_macros(export_symbols, prefix='PyPy')
+ generate_macros(export_symbols, prefix=prefix)
functions = generate_decls_and_callbacks(db, [], api_struct=False,
- prefix='PyPy')
- code = "#include <Python.h>\n" + "\n".join(functions)
+ prefix=prefix)
+ code = "#include <Python.h>\n"
+ if use_micronumpy:
+ code += "#include <pypy_numpy.h> /* api.py line 1290 */"
+ code += "\n".join(functions)
- eci = build_eci(False, export_symbols, code)
+ eci = build_eci(False, export_symbols, code, use_micronumpy)
space.fromcache(State).install_dll(eci)
@@ -1175,9 +1314,14 @@
lines = ['PyObject *pypy_static_pyobjs[] = {\n']
include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n']
for name, (typ, expr) in sorted(GLOBALS.items()):
- if name.endswith('#'):
+ if '#' in name:
+ name, header = name.split('#')
assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*')
- typ, name = typ[:-1], name[:-1]
+ typ = typ[:-1]
+ if header != pypy_decl:
+ # since the #define is not in pypy_macros, do it here
+ mname = mangle_name(prefix, name)
+ include_lines.append('#define %s %s\n' % (name, mname))
elif name.startswith('PyExc_'):
typ = 'PyTypeObject'
name = '_' + name
@@ -1204,6 +1348,8 @@
for header, header_functions in FUNCTIONS_BY_HEADER.iteritems():
for name, func in header_functions.iteritems():
+ if not func:
+ continue
newname = mangle_name('PyPy', name) or name
deco = entrypoint_lowlevel("cpyext", func.argtypes, newname,
relax=True)
@@ -1211,7 +1357,7 @@
setup_init_functions(eci, translating=True)
trunk_include = pypydir.dirpath() / 'include'
- copy_header_files(trunk_include)
+ copy_header_files(trunk_include, use_micronumpy)
def init_static_data_translated(space):
builder = space.fromcache(StaticObjectBuilder)
@@ -1348,10 +1494,17 @@
arg = as_pyobj(space, arg)
boxed_args += (arg,)
+ # see "Handling of the GIL" above
+ tid = rthread.get_ident()
+ assert cpyext_glob_tid_ptr[0] == 0
+ cpyext_glob_tid_ptr[0] = tid
+
try:
# Call the function
result = call_external_function(func, *boxed_args)
finally:
+ assert cpyext_glob_tid_ptr[0] == tid
+ cpyext_glob_tid_ptr[0] = 0
keepalive_until_here(*keepalives)
if is_PyObject(RESULT_TYPE):
diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -2,11 +2,11 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
- PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL)
+ PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL)
from pypy.module.cpyext.pyerrors import PyErr_BadArgument
from pypy.module.cpyext.pyobject import (
PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference,
- make_typedescr, get_typedescr)
+ make_typedescr, get_typedescr, as_pyobj, Py_IncRef)
##
## Implementation of PyStringObject
@@ -27,7 +27,7 @@
## Solution
## --------
##
-## PyStringObject contains two additional members: the size and a pointer to a
+## PyStringObject contains two additional members: the ob_size and a pointer to a
## char buffer; it may be NULL.
##
## - A string allocated by pypy will be converted into a PyStringObject with a
@@ -36,7 +36,7 @@
##
## - A string allocated with PyString_FromStringAndSize(NULL, size) will
## allocate a PyStringObject structure, and a buffer with the specified
-## size, but the reference won't be stored in the global map; there is no
+## size+1, but the reference won't be stored in the global map; there is no
## corresponding object in pypy. When from_ref() or Py_INCREF() is called,
## the pypy string is created, and added to the global map of tracked
## objects. The buffer is then supposed to be immutable.
@@ -52,8 +52,8 @@
PyStringObjectStruct = lltype.ForwardReference()
PyStringObject = lltype.Ptr(PyStringObjectStruct)
-PyStringObjectFields = PyObjectFields + \
- (("buffer", rffi.CCHARP), ("size", Py_ssize_t))
+PyStringObjectFields = PyVarObjectFields + \
+ (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP))
cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct)
@bootstrap_function
@@ -78,10 +78,11 @@
py_str = rffi.cast(PyStringObject, py_obj)
buflen = length + 1
- py_str.c_size = length
+ py_str.c_ob_size = length
py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen,
flavor='raw', zero=True,
add_memory_pressure=True)
+ py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED
return py_str
def string_attach(space, py_obj, w_obj):
@@ -90,8 +91,10 @@
buffer must not be modified.
"""
py_str = rffi.cast(PyStringObject, py_obj)
- py_str.c_size = len(space.str_w(w_obj))
+ py_str.c_ob_size = len(space.str_w(w_obj))
py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO)
+ py_str.c_ob_shash = space.hash_w(w_obj)
+ py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
def string_realize(space, py_obj):
"""
@@ -99,8 +102,13 @@
be modified after this call.
"""
py_str = rffi.cast(PyStringObject, py_obj)
- s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size)
+ if not py_str.c_buffer:
+ py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1,
+ flavor='raw', zero=True)
+ s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size)
w_obj = space.wrap(s)
+ py_str.c_ob_shash = space.hash_w(w_obj)
+ py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL
track_reference(space, py_obj, w_obj)
return w_obj
@@ -169,12 +177,12 @@
ref_str.c_buffer = rffi.str2charp(s)
buffer[0] = ref_str.c_buffer
if length:
- length[0] = ref_str.c_size
+ length[0] = ref_str.c_ob_size
else:
i = 0
while ref_str.c_buffer[i] != '\0':
i += 1
- if i != ref_str.c_size:
+ if i != ref_str.c_ob_size:
raise OperationError(space.w_TypeError, space.wrap(
"expected string without null bytes"))
return 0
@@ -183,7 +191,7 @@
def PyString_Size(space, ref):
if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str:
ref = rffi.cast(PyStringObject, ref)
- return ref.c_size
+ return ref.c_ob_size
else:
w_obj = from_ref(space, ref)
return space.len_w(w_obj)
@@ -212,7 +220,7 @@
ref[0] = lltype.nullptr(PyObject.TO)
raise
to_cp = newsize
- oldsize = py_str.c_size
+ oldsize = py_str.c_ob_size
if oldsize < newsize:
to_cp = oldsize
for i in range(to_cp):
@@ -236,15 +244,16 @@
if not ref[0]:
return
- if w_newpart is None or not PyString_Check(space, ref[0]) or \
- not PyString_Check(space, w_newpart):
+ if w_newpart is None or not PyString_Check(space, ref[0]) or not \
+ (space.isinstance_w(w_newpart, space.w_str) or
+ space.isinstance_w(w_newpart, space.w_unicode)):
Py_DecRef(space, ref[0])
ref[0] = lltype.nullptr(PyObject.TO)
return
w_str = from_ref(space, ref[0])
w_newstr = space.add(w_str, w_newpart)
- Py_DecRef(space, ref[0])
ref[0] = make_ref(space, w_newstr)
+ Py_IncRef(space, ref[0])
@cpython_api([PyObjectP, PyObject], lltype.Void)
def PyString_ConcatAndDel(space, ref, newpart):
diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py
--- a/pypy/module/cpyext/cdatetime.py
+++ b/pypy/module/cpyext/cdatetime.py
@@ -15,6 +15,7 @@
('DateTimeType', PyTypeObjectPtr),
('TimeType', PyTypeObjectPtr),
('DeltaType', PyTypeObjectPtr),
+ ('TZInfoType', PyTypeObjectPtr),
))
@cpython_api([], lltype.Ptr(PyDateTime_CAPI))
@@ -40,11 +41,21 @@
datetimeAPI.c_DeltaType = rffi.cast(
PyTypeObjectPtr, make_ref(space, w_type))
+ w_type = space.getattr(w_datetime, space.wrap("tzinfo"))
+ datetimeAPI.c_TZInfoType = rffi.cast(
+ PyTypeObjectPtr, make_ref(space, w_type))
+
return datetimeAPI
-PyDateTime_Date = PyObject
-PyDateTime_Time = PyObject
-PyDateTime_DateTime = PyObject
+PyDateTime_DateStruct = lltype.ForwardReference()
+PyDateTime_TimeStruct = lltype.ForwardReference()
+PyDateTime_DateTimeStruct = lltype.ForwardReference()
+cpython_struct("PyDateTime_Date", PyObjectFields, PyDateTime_DateStruct)
+PyDateTime_Date = lltype.Ptr(PyDateTime_DateStruct)
+cpython_struct("PyDateTime_Time", PyObjectFields, PyDateTime_TimeStruct)
+PyDateTime_Time = lltype.Ptr(PyDateTime_TimeStruct)
+cpython_struct("PyDateTime_DateTime", PyObjectFields, PyDateTime_DateTimeStruct)
+PyDateTime_DateTime = lltype.Ptr(PyDateTime_DateTimeStruct)
PyDeltaObjectStruct = lltype.ForwardReference()
cpython_struct("PyDateTime_Delta", PyObjectFields, PyDeltaObjectStruct)
@@ -81,6 +92,7 @@
make_check_function("PyDate_Check", "date")
make_check_function("PyTime_Check", "time")
make_check_function("PyDelta_Check", "timedelta")
+make_check_function("PyTZInfo_Check", "tzinfo")
# Constructors
diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py
--- a/pypy/module/cpyext/complexobject.py
+++ b/pypy/module/cpyext/complexobject.py
@@ -1,16 +1,51 @@
from rpython.rtyper.lltypesystem import lltype, rffi
-from pypy.module.cpyext.api import (
+from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function,
cpython_api, cpython_struct, PyObject, build_type_checkers)
+from pypy.module.cpyext.pyobject import (
+ make_typedescr, track_reference, from_ref)
from pypy.module.cpyext.floatobject import PyFloat_AsDouble
from pypy.objspace.std.complexobject import W_ComplexObject
from pypy.interpreter.error import OperationError
PyComplex_Check, PyComplex_CheckExact = build_type_checkers("Complex")
-Py_complex_t = lltype.ForwardReference()
+Py_complex_t = rffi.CStruct('Py_complex_t',
+ ('real', rffi.DOUBLE),
+ ('imag', rffi.DOUBLE),
+ hints={'size': 2 * rffi.sizeof(rffi.DOUBLE)})
Py_complex_ptr = lltype.Ptr(Py_complex_t)
-Py_complex_fields = (("real", rffi.DOUBLE), ("imag", rffi.DOUBLE))
-cpython_struct("Py_complex", Py_complex_fields, Py_complex_t)
+
+PyComplexObjectStruct = lltype.ForwardReference()
+PyComplexObject = lltype.Ptr(PyComplexObjectStruct)
+PyComplexObjectFields = PyObjectFields + \
+ (("cval", Py_complex_t),)
+cpython_struct("PyComplexObject", PyComplexObjectFields, PyComplexObjectStruct)
+
+ at bootstrap_function
+def init_complexobject(space):
+ "Type description of PyComplexObject"
+ make_typedescr(space.w_complex.layout.typedef,
+ basestruct=PyComplexObject.TO,
+ attach=complex_attach,
+ realize=complex_realize)
+
+def complex_attach(space, py_obj, w_obj):
+ """
+ Fills a newly allocated PyComplexObject with the given complex object. The
+ value must not be modified.
+ """
+ assert isinstance(w_obj, W_ComplexObject)
+ py_obj = rffi.cast(PyComplexObject, py_obj)
+ py_obj.c_cval.c_real = w_obj.realval
+ py_obj.c_cval.c_imag = w_obj.imagval
+
+def complex_realize(space, obj):
+ py_obj = rffi.cast(PyComplexObject, obj)
+ w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
+ w_obj = space.allocate_instance(W_ComplexObject, w_type)
+ w_obj.__init__(py_obj.c_cval.c_real, py_obj.c_cval.c_imag)
+ track_reference(space, obj, w_obj)
+ return w_obj
@cpython_api([lltype.Float, lltype.Float], PyObject)
diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py
--- a/pypy/module/cpyext/dictobject.py
+++ b/pypy/module/cpyext/dictobject.py
@@ -23,6 +23,7 @@
# NOTE: this works so far because all our dict strategies store
# *values* as full objects, which stay alive as long as the dict is
# alive and not modified. So we can return a borrowed ref.
+ # XXX this is wrong with IntMutableCell. Hope it works...
return w_res
@cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1)
@@ -62,6 +63,7 @@
# NOTE: this works so far because all our dict strategies store
# *values* as full objects, which stay alive as long as the dict is
# alive and not modified. So we can return a borrowed ref.
+ # XXX this is wrong with IntMutableCell. Hope it works...
return w_res
@cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1)
@@ -104,6 +106,32 @@
"""
return space.call_method(space.w_dict, "copy", w_obj)
+def _has_val(space, w_dict, w_key):
+ try:
+ w_val = space.getitem(w_dict, w_key)
+ except OperationError as e:
+ if e.match(space, space.w_KeyError):
+ return False
+ else:
+ raise
+ return True
+
+ at cpython_api([PyObject, PyObject, rffi.INT_real], rffi.INT_real, error=-1)
+def PyDict_Merge(space, w_a, w_b, override):
+ """Iterate over mapping object b adding key-value pairs to dictionary a.
More information about the pypy-commit
mailing list