[pypy-commit] pypy vmprof-newstack: merge default
fijal
pypy.commits at gmail.com
Fri Jan 8 02:53:34 EST 2016
Author: fijal
Branch: vmprof-newstack
Changeset: r81615:58ef1d3f7f09
Date: 2016-01-08 09:52 +0200
http://bitbucket.org/pypy/pypy/changeset/58ef1d3f7f09/
Log: merge default
diff too long, truncating to 2000 out of 34712 lines
diff --git a/.gitignore b/.gitignore
--- a/.gitignore
+++ b/.gitignore
@@ -29,4 +29,4 @@
release/
!pypy/tool/release/
rpython/_cache/
-__pycache__/
+.cache/
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -17,3 +17,4 @@
295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0
f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1
850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
+5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -28,7 +28,7 @@
DEALINGS IN THE SOFTWARE.
-PyPy Copyright holders 2003-2015
+PyPy Copyright holders 2003-2016
-----------------------------------
Except when otherwise stated (look for LICENSE files or information at
@@ -56,14 +56,15 @@
Anders Chrigstrom
Eric van Riet Paap
Wim Lavrijsen
+ Richard Plangger
Richard Emslie
Alexander Schremmer
Dan Villiom Podlaski Christiansen
Lukas Diekmann
Sven Hager
Anders Lehmann
+ Remi Meier
Aurelien Campeas
- Remi Meier
Niklaus Haldimann
Camillo Bruni
Laura Creighton
@@ -87,7 +88,6 @@
Ludovic Aubry
Jacob Hallen
Jason Creighton
- Richard Plangger
Alex Martelli
Michal Bendowski
stian
@@ -200,9 +200,12 @@
Alex Perry
Vincent Legoll
Alan McIntyre
+ Spenser Bauman
Alexander Sedov
Attila Gobi
Christopher Pope
+ Devin Jeanpierre
+ Vaibhav Sood
Christian Tismer
Marc Abramowitz
Dan Stromberg
@@ -234,6 +237,7 @@
Lutz Paelike
Lucio Torre
Lars Wassermann
+ Philipp Rustemeuer
Henrik Vendelbo
Dan Buch
Miguel de Val Borro
@@ -244,6 +248,7 @@
Martin Blais
Lene Wagner
Tomo Cocoa
+ Kim Jin Su
Toni Mattis
Lucas Stadler
Julian Berman
@@ -253,6 +258,7 @@
Anna Katrina Dominguez
William Leslie
Bobby Impollonia
+ Faye Zhao
timo at eistee.fritz.box
Andrew Thompson
Yusei Tahara
@@ -283,6 +289,7 @@
shoma hosaka
Daniel Neuhäuser
Ben Mather
+ Niclas Olofsson
halgari
Boglarka Vezer
Chris Pressey
@@ -309,13 +316,16 @@
Stefan Marr
jiaaro
Mads Kiilerich
+ Richard Lancaster
opassembler.py
Antony Lee
+ Yaroslav Fedevych
Jim Hunziker
Markus Unterwaditzer
Even Wiik Thomassen
jbs
squeaky
+ Zearin
soareschen
Kurt Griffiths
Mike Bayer
@@ -327,6 +337,7 @@
Anna Ravencroft
Andrey Churin
Dan Crosta
+ Tobias Diaz
Julien Phalip
Roman Podoliaka
Dan Loewenherz
diff --git a/Makefile b/Makefile
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
-all: pypy-c
+all: pypy-c cffi_imports
PYPY_EXECUTABLE := $(shell which pypy)
URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5")
@@ -10,6 +10,8 @@
RUNINTERP = $(PYPY_EXECUTABLE)
endif
+.PHONY: cffi_imports
+
pypy-c:
@echo
@echo "===================================================================="
@@ -36,3 +38,6 @@
# replaced with an opaque --jobserver option by the time this Makefile
# runs. We cannot get their original value either:
# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html
+
+cffi_imports:
+ PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py
diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py
--- a/lib-python/2.7/collections.py
+++ b/lib-python/2.7/collections.py
@@ -18,9 +18,9 @@
assert '__pypy__' not in _sys.builtin_module_names
newdict = lambda _ : {}
try:
- from __pypy__ import reversed_dict
+ from __pypy__ import reversed_dict as _reversed_dict
except ImportError:
- reversed_dict = lambda d: reversed(d.keys())
+ _reversed_dict = None # don't have ordered dicts
try:
from thread import get_ident as _get_ident
@@ -46,7 +46,7 @@
'''
def __reversed__(self):
- return reversed_dict(self)
+ return _reversed_dict(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
@@ -116,6 +116,178 @@
return ItemsView(self)
+def _compat_with_unordered_dicts():
+ # This returns the methods needed in OrderedDict in case the base
+ # 'dict' class is not actually ordered, like on top of CPython or
+ # old PyPy or PyPy-STM.
+
+ # ===== Original comments and code follows =====
+ # ===== The unmodified methods are not repeated =====
+
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as regular dictionaries.
+
+ # The internal self.__map dict maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. The signature is the same as
+ regular dictionaries, but keyword arguments are not recommended because
+ their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link at the end of the linked list,
+ # and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ return dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which gets
+ # removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, _ = self.__map.pop(key)
+ link_prev[1] = link_next # update link_prev[NEXT]
+ link_next[0] = link_prev # update link_next[PREV]
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ # Traverse the linked list in order.
+ root = self.__root
+ curr = root[1] # start at the first node
+ while curr is not root:
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[1] # move to next node
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ # Traverse the linked list in reverse order.
+ root = self.__root
+ curr = root[0] # start at the last node
+ while curr is not root:
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[0] # move to previous node
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ dict.clear(self)
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) pairs in od'
+ for k in self:
+ yield (k, self[k])
+
+ update = MutableMapping.update
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
+ value. If key is not found, d is returned if given, otherwise KeyError
+ is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ key = next(reversed(self) if last else iter(self))
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
+ If not specified, the value defaults to None.
+
+ '''
+ self = cls()
+ for key in iterable:
+ self[key] = value
+ return self
+
+ return locals()
+
+if _reversed_dict is None:
+ for _key, _value in _compat_with_unordered_dicts().items():
+ setattr(OrderedDict, _key, _value)
+ del _key, _value
+
################################################################################
### namedtuple
################################################################################
diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py
--- a/lib-python/2.7/json/encoder.py
+++ b/lib-python/2.7/json/encoder.py
@@ -8,13 +8,13 @@
def __init__(self):
self._builder = StringBuilder()
def append(self, string):
- try:
- self._builder.append(string)
- except UnicodeEncodeError:
+ if (isinstance(string, unicode) and
+ type(self._builder) is StringBuilder):
ub = UnicodeBuilder()
ub.append(self._builder.build())
self._builder = ub
- ub.append(string)
+ self.append = ub.append # shortcut only
+ self._builder.append(string)
def build(self):
return self._builder.build()
diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py
--- a/lib-python/2.7/pickle.py
+++ b/lib-python/2.7/pickle.py
@@ -1376,6 +1376,7 @@
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
+ This is overriden on PyPy by a RPython version that has linear complexity.
>>> decode_long('')
0L
@@ -1402,6 +1403,11 @@
n -= 1L << (nbytes * 8)
return n
+try:
+ from __pypy__ import decode_long
+except ImportError:
+ pass
+
# Shorthands
try:
diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py
--- a/lib-python/2.7/sysconfig.py
+++ b/lib-python/2.7/sysconfig.py
@@ -524,6 +524,13 @@
import _osx_support
_osx_support.customize_config_vars(_CONFIG_VARS)
+ # PyPy:
+ import imp
+ for suffix, mode, type_ in imp.get_suffixes():
+ if type_ == imp.C_EXTENSION:
+ _CONFIG_VARS['SOABI'] = suffix.split('.')[1]
+ break
+
if args:
vals = []
for name in args:
diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py
--- a/lib-python/2.7/uuid.py
+++ b/lib-python/2.7/uuid.py
@@ -604,21 +604,8 @@
def uuid4():
"""Generate a random UUID."""
-
- # When the system provides a version-4 UUID generator, use it.
- if _uuid_generate_random:
- _buffer = ctypes.create_string_buffer(16)
- _uuid_generate_random(_buffer)
- return UUID(bytes=_buffer.raw)
-
- # Otherwise, get randomness from urandom or the 'random' module.
- try:
- import os
- return UUID(bytes=os.urandom(16), version=4)
- except:
- import random
- bytes = [chr(random.randrange(256)) for i in range(16)]
- return UUID(bytes=bytes, version=4)
+ import os
+ return UUID(bytes=os.urandom(16), version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py
--- a/lib_pypy/cPickle.py
+++ b/lib_pypy/cPickle.py
@@ -167,7 +167,11 @@
try:
key = ord(self.read(1))
while key != STOP:
- self.dispatch[key](self)
+ try:
+ meth = self.dispatch[key]
+ except KeyError:
+ raise UnpicklingError("invalid load key, %r." % chr(key))
+ meth(self)
key = ord(self.read(1))
except TypeError:
if self.read(1) == '':
@@ -559,6 +563,7 @@
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
+ This is overriden on PyPy by a RPython version that has linear complexity.
>>> decode_long('')
0L
@@ -592,6 +597,11 @@
n -= 1L << (nbytes << 3)
return n
+try:
+ from __pypy__ import decode_long
+except ImportError:
+ pass
+
def load(f):
return Unpickler(f).load()
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.3.0
+Version: 1.4.2
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.3.0"
-__version_info__ = (1, 3, 0)
+__version__ = "1.4.2"
+__version_info__ = (1, 4, 2)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -146,7 +146,9 @@
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
-#define _CFFI_NUM_EXPORTS 25
+#define _cffi_call_python \
+ ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25])
+#define _CFFI_NUM_EXPORTS 26
typedef struct _ctypedescr CTypeDescrObject;
@@ -201,8 +203,11 @@
the others follow */
}
+/********** end CPython-specific section **********/
+#else
+_CFFI_UNUSED_FN
+static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *);
#endif
-/********** end CPython-specific section **********/
#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0]))
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -72,6 +72,8 @@
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
+ self._init_once_cache = {}
+ self._cdef_version = None
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in backend.__dict__:
@@ -104,6 +106,7 @@
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
+ self._cdef_version = object()
self._parser.parse(csource, override=override, packed=packed)
self._cdefsources.append(csource)
if override:
@@ -589,14 +592,39 @@
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
- def compile(self, tmpdir='.'):
+ def compile(self, tmpdir='.', verbose=0):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
- source_extension=source_extension, **kwds)
+ source_extension=source_extension,
+ compiler_verbose=verbose, **kwds)
+
+ def init_once(self, func, tag):
+ # Read _init_once_cache[tag], which is either (False, lock) if
+ # we're calling the function now in some thread, or (True, result).
+ # Don't call setdefault() in most cases, to avoid allocating and
+ # immediately freeing a lock; but still use setdefaut() to avoid
+ # races.
+ try:
+ x = self._init_once_cache[tag]
+ except KeyError:
+ x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
+ # Common case: we got (True, result), so we return the result.
+ if x[0]:
+ return x[1]
+ # Else, it's a lock. Acquire it to serialize the following tests.
+ with x[1]:
+ # Read again from _init_once_cache the current status.
+ x = self._init_once_cache[tag]
+ if x[0]:
+ return x[1]
+ # Call the function and store the result back.
+ result = func()
+ self._init_once_cache[tag] = (True, result)
+ return result
def _load_backend_lib(backend, name, flags):
@@ -620,70 +648,70 @@
import os
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
- copied_enums = []
#
- def make_accessor_locked(name):
+ def accessor_function(name):
key = 'function ' + name
- if key in ffi._parser._declarations:
- tp, _ = ffi._parser._declarations[key]
- BType = ffi._get_cached_btype(tp)
- try:
- value = backendlib.load_function(BType, name)
- except KeyError as e:
- raise AttributeError('%s: %s' % (name, e))
- library.__dict__[name] = value
+ tp, _ = ffi._parser._declarations[key]
+ BType = ffi._get_cached_btype(tp)
+ try:
+ value = backendlib.load_function(BType, name)
+ except KeyError as e:
+ raise AttributeError('%s: %s' % (name, e))
+ library.__dict__[name] = value
+ #
+ def accessor_variable(name):
+ key = 'variable ' + name
+ tp, _ = ffi._parser._declarations[key]
+ BType = ffi._get_cached_btype(tp)
+ read_variable = backendlib.read_variable
+ write_variable = backendlib.write_variable
+ setattr(FFILibrary, name, property(
+ lambda self: read_variable(BType, name),
+ lambda self, value: write_variable(BType, name, value)))
+ #
+ def accessor_constant(name):
+ raise NotImplementedError("non-integer constant '%s' cannot be "
+ "accessed from a dlopen() library" % (name,))
+ #
+ def accessor_int_constant(name):
+ library.__dict__[name] = ffi._parser._int_constants[name]
+ #
+ accessors = {}
+ accessors_version = [False]
+ #
+ def update_accessors():
+ if accessors_version[0] is ffi._cdef_version:
return
#
- key = 'variable ' + name
- if key in ffi._parser._declarations:
- tp, _ = ffi._parser._declarations[key]
- BType = ffi._get_cached_btype(tp)
- read_variable = backendlib.read_variable
- write_variable = backendlib.write_variable
- setattr(FFILibrary, name, property(
- lambda self: read_variable(BType, name),
- lambda self, value: write_variable(BType, name, value)))
- return
- #
- if not copied_enums:
- from . import model
- error = None
- for key, (tp, _) in ffi._parser._declarations.items():
- if not isinstance(tp, model.EnumType):
- continue
- try:
- tp.check_not_partial()
- except Exception as e:
- error = e
- continue
- for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
- if enumname not in library.__dict__:
- library.__dict__[enumname] = enumval
- if error is not None:
- if name in library.__dict__:
- return # ignore error, about a different enum
- raise error
-
- for key, val in ffi._parser._int_constants.items():
- if key not in library.__dict__:
- library.__dict__[key] = val
-
- copied_enums.append(True)
- if name in library.__dict__:
- return
- #
- key = 'constant ' + name
- if key in ffi._parser._declarations:
- raise NotImplementedError("fetching a non-integer constant "
- "after dlopen()")
- #
- raise AttributeError(name)
+ from . import model
+ for key, (tp, _) in ffi._parser._declarations.items():
+ if not isinstance(tp, model.EnumType):
+ tag, name = key.split(' ', 1)
+ if tag == 'function':
+ accessors[name] = accessor_function
+ elif tag == 'variable':
+ accessors[name] = accessor_variable
+ elif tag == 'constant':
+ accessors[name] = accessor_constant
+ else:
+ for i, enumname in enumerate(tp.enumerators):
+ def accessor_enum(name, tp=tp, i=i):
+ tp.check_not_partial()
+ library.__dict__[name] = tp.enumvalues[i]
+ accessors[enumname] = accessor_enum
+ for name in ffi._parser._int_constants:
+ accessors.setdefault(name, accessor_int_constant)
+ accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
- make_accessor_locked(name)
+ if name not in accessors:
+ update_accessors()
+ if name not in accessors:
+ raise AttributeError(name)
+ accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
@@ -697,6 +725,10 @@
setattr(self, name, value)
else:
property.__set__(self, value)
+ def __dir__(self):
+ with ffi._lock:
+ update_accessors()
+ return accessors.keys()
#
if libname is not None:
try:
diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py
--- a/lib_pypy/cffi/cffi_opcode.py
+++ b/lib_pypy/cffi/cffi_opcode.py
@@ -54,6 +54,7 @@
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
OP_GLOBAL_VAR_F = 39
+OP_EXTERN_PYTHON = 41
PRIM_VOID = 0
PRIM_BOOL = 1
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,6 +29,7 @@
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
+_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
@@ -62,7 +63,8 @@
if csource.startswith('*', endpos):
parts.append('('); closing += ')'
level = 0
- for i in xrange(endpos, len(csource)):
+ i = endpos
+ while i < len(csource):
c = csource[i]
if c == '(':
level += 1
@@ -73,11 +75,53 @@
elif c in ',;=':
if level == 0:
break
+ i += 1
csource = csource[endpos:i] + closing + csource[i:]
#print repr(''.join(parts)+csource)
parts.append(csource)
return ''.join(parts)
+def _preprocess_extern_python(csource):
+ # input: `extern "Python" int foo(int);` or
+ # `extern "Python" { int foo(int); }`
+ # output:
+ # void __cffi_extern_python_start;
+ # int foo(int);
+ # void __cffi_extern_python_stop;
+ parts = []
+ while True:
+ match = _r_extern_python.search(csource)
+ if not match:
+ break
+ endpos = match.end() - 1
+ #print
+ #print ''.join(parts)+csource
+ #print '=>'
+ parts.append(csource[:match.start()])
+ parts.append('void __cffi_extern_python_start; ')
+ if csource[endpos] == '{':
+ # grouping variant
+ closing = csource.find('}', endpos)
+ if closing < 0:
+ raise api.CDefError("'extern \"Python\" {': no '}' found")
+ if csource.find('{', endpos + 1, closing) >= 0:
+ raise NotImplementedError("cannot use { } inside a block "
+ "'extern \"Python\" { ... }'")
+ parts.append(csource[endpos+1:closing])
+ csource = csource[closing+1:]
+ else:
+ # non-grouping variant
+ semicolon = csource.find(';', endpos)
+ if semicolon < 0:
+ raise api.CDefError("'extern \"Python\": no ';' found")
+ parts.append(csource[endpos:semicolon+1])
+ csource = csource[semicolon+1:]
+ parts.append(' void __cffi_extern_python_stop;')
+ #print ''.join(parts)+csource
+ #print
+ parts.append(csource)
+ return ''.join(parts)
+
def _preprocess(csource):
# Remove comments. NOTE: this only work because the cdef() section
# should not contain any string literal!
@@ -101,8 +145,13 @@
csource = _r_stdcall2.sub(' volatile volatile const(', csource)
csource = _r_stdcall1.sub(' volatile volatile const ', csource)
csource = _r_cdecl.sub(' ', csource)
+ #
+ # Replace `extern "Python"` with start/end markers
+ csource = _preprocess_extern_python(csource)
+ #
# Replace "[...]" with "[__dotdotdotarray__]"
csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
+ #
# Replace "...}" with "__dotdotdotNUM__}". This construction should
# occur only at the end of enums; at the end of structs we have "...;}"
# and at the end of vararg functions "...);". Also replace "=...[,}]"
@@ -255,6 +304,7 @@
break
#
try:
+ self._inside_extern_python = False
for decl in iterator:
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
@@ -324,13 +374,19 @@
' #define %s %s'
% (key, key, key, value))
+ def _declare_function(self, tp, quals, decl):
+ tp = self._get_type_pointer(tp, quals)
+ if self._inside_extern_python:
+ self._declare('extern_python ' + decl.name, tp)
+ else:
+ self._declare('function ' + decl.name, tp)
+
def _parse_decl(self, decl):
node = decl.type
if isinstance(node, pycparser.c_ast.FuncDecl):
tp, quals = self._get_type_and_quals(node, name=decl.name)
assert isinstance(tp, model.RawFunctionType)
- tp = self._get_type_pointer(tp, quals)
- self._declare('function ' + decl.name, tp)
+ self._declare_function(tp, quals, decl)
else:
if isinstance(node, pycparser.c_ast.Struct):
self._get_struct_union_enum_type('struct', node)
@@ -346,8 +402,7 @@
tp, quals = self._get_type_and_quals(node,
partial_length_ok=True)
if tp.is_raw_function:
- tp = self._get_type_pointer(tp, quals)
- self._declare('function ' + decl.name, tp)
+ self._declare_function(tp, quals, decl)
elif (tp.is_integer_type() and
hasattr(decl, 'init') and
hasattr(decl.init, 'value') and
@@ -360,10 +415,23 @@
_r_int_literal.match(decl.init.expr.value)):
self._add_integer_constant(decl.name,
'-' + decl.init.expr.value)
- elif (quals & model.Q_CONST) and not tp.is_array_type:
- self._declare('constant ' + decl.name, tp, quals=quals)
+ elif (tp is model.void_type and
+ decl.name.startswith('__cffi_extern_python_')):
+ # hack: `extern "Python"` in the C source is replaced
+ # with "void __cffi_extern_python_start;" and
+ # "void __cffi_extern_python_stop;"
+ self._inside_extern_python = not self._inside_extern_python
+ assert self._inside_extern_python == (
+ decl.name == '__cffi_extern_python_start')
else:
- self._declare('variable ' + decl.name, tp, quals=quals)
+ if self._inside_extern_python:
+ raise api.CDefError(
+ "cannot declare constants or "
+ "variables with 'extern \"Python\"'")
+ if (quals & model.Q_CONST) and not tp.is_array_type:
+ self._declare('constant ' + decl.name, tp, quals=quals)
+ else:
+ self._declare('variable ' + decl.name, tp, quals=quals)
def parse_type(self, cdecl):
return self.parse_type_and_quals(cdecl)[0]
diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
--- a/lib_pypy/cffi/ffiplatform.py
+++ b/lib_pypy/cffi/ffiplatform.py
@@ -17,15 +17,16 @@
def get_extension(srcfilename, modname, sources=(), **kwds):
from distutils.core import Extension
allsources = [srcfilename]
- allsources.extend(sources)
+ for src in sources:
+ allsources.append(os.path.normpath(src))
return Extension(name=modname, sources=allsources, **kwds)
-def compile(tmpdir, ext):
+def compile(tmpdir, ext, compiler_verbose=0):
"""Compile a C extension module using distutils."""
saved_environ = os.environ.copy()
try:
- outputfilename = _build(tmpdir, ext)
+ outputfilename = _build(tmpdir, ext, compiler_verbose)
outputfilename = os.path.abspath(outputfilename)
finally:
# workaround for a distutils bugs where some env vars can
@@ -35,10 +36,10 @@
os.environ[key] = value
return outputfilename
-def _build(tmpdir, ext):
+def _build(tmpdir, ext, compiler_verbose=0):
# XXX compact but horrible :-(
from distutils.core import Distribution
- import distutils.errors
+ import distutils.errors, distutils.log
#
dist = Distribution({'ext_modules': [ext]})
dist.parse_config_files()
@@ -48,7 +49,12 @@
options['build_temp'] = ('ffiplatform', tmpdir)
#
try:
- dist.run_command('build_ext')
+ old_level = distutils.log.set_threshold(0) or 0
+ try:
+ distutils.log.set_verbosity(compiler_verbose)
+ dist.run_command('build_ext')
+ finally:
+ distutils.log.set_threshold(old_level)
except (distutils.errors.CompileError,
distutils.errors.LinkError) as e:
raise VerificationError('%s: %s' % (e.__class__.__name__, e))
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -514,12 +514,17 @@
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
+ from . import api
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
- smallest_value = 0
- largest_value = 0
+ import warnings
+ warnings.warn("%r has no values explicitly defined; next version "
+ "will refuse to guess which integer type it is "
+ "meant to be (unsigned/signed, int/long)"
+ % self._get_c_name())
+ smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -1,5 +1,6 @@
-/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */
+/* This part is from file 'cffi/parse_c_type.h'. It is copied at the
+ beginning of C sources generated by CFFI's ffi.set_source(). */
typedef void *_cffi_opcode_t;
@@ -27,6 +28,7 @@
#define _CFFI_OP_DLOPEN_FUNC 35
#define _CFFI_OP_DLOPEN_CONST 37
#define _CFFI_OP_GLOBAL_VAR_F 39
+#define _CFFI_OP_EXTERN_PYTHON 41
#define _CFFI_PRIM_VOID 0
#define _CFFI_PRIM_BOOL 1
@@ -160,6 +162,12 @@
const char *error_message;
};
+struct _cffi_externpy_s {
+ const char *name;
+ size_t size_of_result;
+ void *reserved1, *reserved2;
+};
+
#ifdef _CFFI_INTERNAL
static int parse_c_type(struct _cffi_parse_info_s *info, const char *input);
static int search_in_globals(const struct _cffi_type_context_s *ctx,
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -118,6 +118,7 @@
class Recompiler:
+ _num_externpy = 0
def __init__(self, ffi, module_name, target_is_python=False):
self.ffi = ffi
@@ -356,7 +357,10 @@
else:
prnt(' NULL, /* no includes */')
prnt(' %d, /* num_types */' % (len(self.cffi_types),))
- prnt(' 0, /* flags */')
+ flags = 0
+ if self._num_externpy:
+ flags |= 1 # set to mean that we use extern "Python"
+ prnt(' %d, /* flags */' % flags)
prnt('};')
prnt()
#
@@ -366,6 +370,11 @@
prnt('PyMODINIT_FUNC')
prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
prnt('{')
+ if self._num_externpy:
+ prnt(' if (((intptr_t)p[0]) >= 0x0A03) {')
+ prnt(' _cffi_call_python = '
+ '(void(*)(struct _cffi_externpy_s *, char *))p[1];')
+ prnt(' }')
prnt(' p[0] = (const void *)%s;' % VERSION)
prnt(' p[1] = &_cffi_type_context;')
prnt('}')
@@ -1108,6 +1117,75 @@
GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
# ----------
+ # extern "Python"
+
+ def _generate_cpy_extern_python_collecttype(self, tp, name):
+ assert isinstance(tp, model.FunctionPtrType)
+ self._do_collect_type(tp)
+
+ def _generate_cpy_extern_python_decl(self, tp, name):
+ prnt = self._prnt
+ if isinstance(tp.result, model.VoidType):
+ size_of_result = '0'
+ else:
+ context = 'result of %s' % name
+ size_of_result = '(int)sizeof(%s)' % (
+ tp.result.get_c_name('', context),)
+ prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name)
+ prnt(' { "%s", %s };' % (name, size_of_result))
+ prnt()
+ #
+ arguments = []
+ context = 'argument of %s' % name
+ for i, type in enumerate(tp.args):
+ arg = type.get_c_name(' a%d' % i, context)
+ arguments.append(arg)
+ #
+ repr_arguments = ', '.join(arguments)
+ repr_arguments = repr_arguments or 'void'
+ name_and_arguments = '%s(%s)' % (name, repr_arguments)
+ #
+ def may_need_128_bits(tp):
+ return (isinstance(tp, model.PrimitiveType) and
+ tp.name == 'long double')
+ #
+ size_of_a = max(len(tp.args)*8, 8)
+ if may_need_128_bits(tp.result):
+ size_of_a = max(size_of_a, 16)
+ if isinstance(tp.result, model.StructOrUnion):
+ size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
+ tp.result.get_c_name(''), size_of_a,
+ tp.result.get_c_name(''), size_of_a)
+ prnt('static %s' % tp.result.get_c_name(name_and_arguments))
+ prnt('{')
+ prnt(' char a[%s];' % size_of_a)
+ prnt(' char *p = a;')
+ for i, type in enumerate(tp.args):
+ arg = 'a%d' % i
+ if (isinstance(type, model.StructOrUnion) or
+ may_need_128_bits(type)):
+ arg = '&' + arg
+ type = model.PointerType(type)
+ prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg))
+ prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name)
+ if not isinstance(tp.result, model.VoidType):
+ prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),))
+ prnt('}')
+ prnt()
+ self._num_externpy += 1
+
+ def _generate_cpy_extern_python_ctx(self, tp, name):
+ if self.target_is_python:
+ raise ffiplatform.VerificationError(
+ "cannot use 'extern \"Python\"' in the ABI mode")
+ if tp.ellipsis:
+ raise NotImplementedError("a vararg function is extern \"Python\"")
+ type_index = self._typesdict[tp]
+ type_op = CffiOp(OP_EXTERN_PYTHON, type_index)
+ self._lsts["global"].append(
+ GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
+
+ # ----------
# emitting the opcodes for individual types
def _emit_bytecode_VoidType(self, tp, index):
@@ -1232,7 +1310,8 @@
return os.path.join(outputdir, *parts), parts
def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
- c_file=None, source_extension='.c', extradir=None, **kwds):
+ c_file=None, source_extension='.c', extradir=None,
+ compiler_verbose=1, **kwds):
if not isinstance(module_name, str):
module_name = module_name.encode('ascii')
if ffi._windows_unicode:
@@ -1252,7 +1331,7 @@
cwd = os.getcwd()
try:
os.chdir(tmpdir)
- outputfilename = ffiplatform.compile('.', ext)
+ outputfilename = ffiplatform.compile('.', ext, compiler_verbose)
finally:
os.chdir(cwd)
return outputfilename
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -21,6 +21,8 @@
import math as _math
import struct as _struct
+_SENTINEL = object()
+
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
@@ -31,6 +33,8 @@
MAXYEAR = 9999
_MINYEARFMT = 1900
+_MAX_DELTA_DAYS = 999999999
+
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
@@ -95,6 +99,15 @@
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
+_US_PER_US = 1
+_US_PER_MS = 1000
+_US_PER_SECOND = 1000000
+_US_PER_MINUTE = 60000000
+_SECONDS_PER_DAY = 24 * 3600
+_US_PER_HOUR = 3600000000
+_US_PER_DAY = 86400000000
+_US_PER_WEEK = 604800000000
+
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
@@ -271,15 +284,17 @@
def _check_int_field(value):
if isinstance(value, int):
- return value
+ return int(value)
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
- if isinstance(value, (int, long)):
- return value
+ if isinstance(value, int):
+ return int(value)
+ elif isinstance(value, long):
+ return int(long(value))
raise TypeError('__int__ method should return an integer')
raise TypeError('an integer is required')
raise TypeError('integer argument expected, got float')
@@ -344,75 +359,79 @@
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
-# This is a start at a struct tm workalike. Goals:
-#
-# + Works the same way across platforms.
-# + Handles all the fields datetime needs handled, without 1970-2038 glitches.
-#
-# Note: I suspect it's best if this flavor of tm does *not* try to
-# second-guess timezones or DST. Instead fold whatever adjustments you want
-# into the minutes argument (and the constructor will normalize).
+def _normalize_pair(hi, lo, factor):
+ if not 0 <= lo <= factor-1:
+ inc, lo = divmod(lo, factor)
+ hi += inc
+ return hi, lo
-class _tmxxx:
+def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False):
+ # Normalize all the inputs, and store the normalized values.
+ ss, us = _normalize_pair(ss, us, 1000000)
+ mm, ss = _normalize_pair(mm, ss, 60)
+ hh, mm = _normalize_pair(hh, mm, 60)
+ d, hh = _normalize_pair(d, hh, 24)
+ y, m, d = _normalize_date(y, m, d, ignore_overflow)
+ return y, m, d, hh, mm, ss, us
- ordinal = None
+def _normalize_date(year, month, day, ignore_overflow=False):
+ # That was easy. Now it gets muddy: the proper range for day
+ # can't be determined without knowing the correct month and year,
+ # but if day is, e.g., plus or minus a million, the current month
+ # and year values make no sense (and may also be out of bounds
+ # themselves).
+ # Saying 12 months == 1 year should be non-controversial.
+ if not 1 <= month <= 12:
+ year, month = _normalize_pair(year, month-1, 12)
+ month += 1
+ assert 1 <= month <= 12
- def __init__(self, year, month, day, hour=0, minute=0, second=0,
- microsecond=0):
- # Normalize all the inputs, and store the normalized values.
- if not 0 <= microsecond <= 999999:
- carry, microsecond = divmod(microsecond, 1000000)
- second += carry
- if not 0 <= second <= 59:
- carry, second = divmod(second, 60)
- minute += carry
- if not 0 <= minute <= 59:
- carry, minute = divmod(minute, 60)
- hour += carry
- if not 0 <= hour <= 23:
- carry, hour = divmod(hour, 24)
- day += carry
+ # Now only day can be out of bounds (year may also be out of bounds
+ # for a datetime object, but we don't care about that here).
+ # If day is out of bounds, what to do is arguable, but at least the
+ # method here is principled and explainable.
+ dim = _days_in_month(year, month)
+ if not 1 <= day <= dim:
+ # Move day-1 days from the first of the month. First try to
+ # get off cheap if we're only one day out of range (adjustments
+ # for timezone alone can't be worse than that).
+ if day == 0: # move back a day
+ month -= 1
+ if month > 0:
+ day = _days_in_month(year, month)
+ else:
+ year, month, day = year-1, 12, 31
+ elif day == dim + 1: # move forward a day
+ month += 1
+ day = 1
+ if month > 12:
+ month = 1
+ year += 1
+ else:
+ ordinal = _ymd2ord(year, month, 1) + (day - 1)
+ year, month, day = _ord2ymd(ordinal)
- # That was easy. Now it gets muddy: the proper range for day
- # can't be determined without knowing the correct month and year,
- # but if day is, e.g., plus or minus a million, the current month
- # and year values make no sense (and may also be out of bounds
- # themselves).
- # Saying 12 months == 1 year should be non-controversial.
- if not 1 <= month <= 12:
- carry, month = divmod(month-1, 12)
- year += carry
- month += 1
- assert 1 <= month <= 12
+ if not ignore_overflow and not MINYEAR <= year <= MAXYEAR:
+ raise OverflowError("date value out of range")
+ return year, month, day
- # Now only day can be out of bounds (year may also be out of bounds
- # for a datetime object, but we don't care about that here).
- # If day is out of bounds, what to do is arguable, but at least the
- # method here is principled and explainable.
- dim = _days_in_month(year, month)
- if not 1 <= day <= dim:
- # Move day-1 days from the first of the month. First try to
- # get off cheap if we're only one day out of range (adjustments
- # for timezone alone can't be worse than that).
- if day == 0: # move back a day
- month -= 1
- if month > 0:
- day = _days_in_month(year, month)
- else:
- year, month, day = year-1, 12, 31
- elif day == dim + 1: # move forward a day
- month += 1
- day = 1
- if month > 12:
- month = 1
- year += 1
- else:
- self.ordinal = _ymd2ord(year, month, 1) + (day - 1)
- year, month, day = _ord2ymd(self.ordinal)
-
- self.year, self.month, self.day = year, month, day
- self.hour, self.minute, self.second = hour, minute, second
- self.microsecond = microsecond
+def _accum(tag, sofar, num, factor, leftover):
+ if isinstance(num, (int, long)):
+ prod = num * factor
+ rsum = sofar + prod
+ return rsum, leftover
+ if isinstance(num, float):
+ fracpart, intpart = _math.modf(num)
+ prod = int(intpart) * factor
+ rsum = sofar + prod
+ if fracpart == 0.0:
+ return rsum, leftover
+ assert isinstance(factor, (int, long))
+ fracpart, intpart = _math.modf(factor * fracpart)
+ rsum += int(intpart)
+ return rsum, leftover + fracpart
+ raise TypeError("unsupported type for timedelta %s component: %s" %
+ (tag, type(num)))
class timedelta(object):
"""Represent the difference between two datetime objects.
@@ -433,100 +452,42 @@
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
- def __new__(cls, days=0, seconds=0, microseconds=0,
- milliseconds=0, minutes=0, hours=0, weeks=0):
- # Doing this efficiently and accurately in C is going to be difficult
- # and error-prone, due to ubiquitous overflow possibilities, and that
- # C double doesn't have enough bits of precision to represent
- # microseconds over 10K years faithfully. The code here tries to make
- # explicit where go-fast assumptions can be relied on, in order to
- # guide the C implementation; it's way more convoluted than speed-
- # ignoring auto-overflow-to-long idiomatic Python could be.
+ def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL,
+ milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL):
+ x = 0
+ leftover = 0.0
+ if microseconds is not _SENTINEL:
+ x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover)
+ if milliseconds is not _SENTINEL:
+ x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover)
+ if seconds is not _SENTINEL:
+ x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover)
+ if minutes is not _SENTINEL:
+ x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover)
+ if hours is not _SENTINEL:
+ x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover)
+ if days is not _SENTINEL:
+ x, leftover = _accum("days", x, days, _US_PER_DAY, leftover)
+ if weeks is not _SENTINEL:
+ x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover)
+ if leftover != 0.0:
+ x += _round(leftover)
+ return cls._from_microseconds(x)
- # XXX Check that all inputs are ints, longs or floats.
+ @classmethod
+ def _from_microseconds(cls, us):
+ s, us = divmod(us, _US_PER_SECOND)
+ d, s = divmod(s, _SECONDS_PER_DAY)
+ return cls._create(d, s, us, False)
- # Final values, all integer.
- # s and us fit in 32-bit signed ints; d isn't bounded.
- d = s = us = 0
+ @classmethod
+ def _create(cls, d, s, us, normalize):
+ if normalize:
+ s, us = _normalize_pair(s, us, 1000000)
+ d, s = _normalize_pair(d, s, 24*3600)
- # Normalize everything to days, seconds, microseconds.
- days += weeks*7
- seconds += minutes*60 + hours*3600
- microseconds += milliseconds*1000
-
- # Get rid of all fractions, and normalize s and us.
- # Take a deep breath <wink>.
- if isinstance(days, float):
- dayfrac, days = _math.modf(days)
- daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
- assert daysecondswhole == int(daysecondswhole) # can't overflow
- s = int(daysecondswhole)
- assert days == int(days)
- d = int(days)
- else:
- daysecondsfrac = 0.0
- d = days
- assert isinstance(daysecondsfrac, float)
- assert abs(daysecondsfrac) <= 1.0
- assert isinstance(d, (int, long))
- assert abs(s) <= 24 * 3600
- # days isn't referenced again before redefinition
-
- if isinstance(seconds, float):
- secondsfrac, seconds = _math.modf(seconds)
- assert seconds == int(seconds)
- seconds = int(seconds)
- secondsfrac += daysecondsfrac
- assert abs(secondsfrac) <= 2.0
- else:
- secondsfrac = daysecondsfrac
- # daysecondsfrac isn't referenced again
- assert isinstance(secondsfrac, float)
- assert abs(secondsfrac) <= 2.0
-
- assert isinstance(seconds, (int, long))
- days, seconds = divmod(seconds, 24*3600)
- d += days
- s += int(seconds) # can't overflow
- assert isinstance(s, int)
- assert abs(s) <= 2 * 24 * 3600
- # seconds isn't referenced again before redefinition
-
- usdouble = secondsfrac * 1e6
- assert abs(usdouble) < 2.1e6 # exact value not critical
- # secondsfrac isn't referenced again
-
- if isinstance(microseconds, float):
- microseconds = _round(microseconds + usdouble)
- seconds, microseconds = divmod(microseconds, 1000000)
- days, seconds = divmod(seconds, 24*3600)
- d += days
- s += int(seconds)
- microseconds = int(microseconds)
- else:
- microseconds = int(microseconds)
- seconds, microseconds = divmod(microseconds, 1000000)
- days, seconds = divmod(seconds, 24*3600)
- d += days
- s += int(seconds)
- microseconds = _round(microseconds + usdouble)
- assert isinstance(s, int)
- assert isinstance(microseconds, int)
- assert abs(s) <= 3 * 24 * 3600
- assert abs(microseconds) < 3.1e6
-
- # Just a little bit of carrying possible for microseconds and seconds.
- seconds, us = divmod(microseconds, 1000000)
- s += seconds
- days, s = divmod(s, 24*3600)
- d += days
-
- assert isinstance(d, (int, long))
- assert isinstance(s, int) and 0 <= s < 24*3600
- assert isinstance(us, int) and 0 <= us < 1000000
-
- if abs(d) > 999999999:
- raise OverflowError("timedelta # of days is too large: %d" % d)
+ if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
+ raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS))
self = object.__new__(cls)
self._days = d
@@ -535,6 +496,10 @@
self._hashcode = -1
return self
+ def _to_microseconds(self):
+ return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND +
+ self._microseconds)
+
def __repr__(self):
module = "datetime." if self.__class__ is timedelta else ""
if self._microseconds:
@@ -562,8 +527,7 @@
def total_seconds(self):
"""Total seconds in the duration."""
- return ((self.days * 86400 + self.seconds) * 10**6 +
- self.microseconds) / 10**6
+ return self._to_microseconds() / 10**6
# Read-only field accessors
@property
@@ -585,36 +549,37 @@
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
- return timedelta(self._days + other._days,
- self._seconds + other._seconds,
- self._microseconds + other._microseconds)
+ return timedelta._create(self._days + other._days,
+ self._seconds + other._seconds,
+ self._microseconds + other._microseconds,
+ True)
return NotImplemented
- __radd__ = __add__
-
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
- return timedelta(self._days - other._days,
- self._seconds - other._seconds,
- self._microseconds - other._microseconds)
- return NotImplemented
-
- def __rsub__(self, other):
- if isinstance(other, timedelta):
- return -self + other
+ return timedelta._create(self._days - other._days,
+ self._seconds - other._seconds,
+ self._microseconds - other._microseconds,
+ True)
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
- return timedelta(-self._days,
- -self._seconds,
- -self._microseconds)
+ return timedelta._create(-self._days,
+ -self._seconds,
+ -self._microseconds,
+ True)
def __pos__(self):
- return self
+ # for CPython compatibility, we cannot use
+ # our __class__ here, but need a real timedelta
+ return timedelta._create(self._days,
+ self._seconds,
+ self._microseconds,
+ False)
def __abs__(self):
if self._days < 0:
@@ -623,25 +588,18 @@
return self
def __mul__(self, other):
- if isinstance(other, (int, long)):
- # for CPython compatibility, we cannot use
- # our __class__ here, but need a real timedelta
- return timedelta(self._days * other,
- self._seconds * other,
- self._microseconds * other)
- return NotImplemented
+ if not isinstance(other, (int, long)):
+ return NotImplemented
+ usec = self._to_microseconds()
+ return timedelta._from_microseconds(usec * other)
__rmul__ = __mul__
- def _to_microseconds(self):
- return ((self._days * (24*3600) + self._seconds) * 1000000 +
- self._microseconds)
-
def __div__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
- return timedelta(0, 0, usec // other)
+ return timedelta._from_microseconds(usec // other)
__floordiv__ = __div__
@@ -705,9 +663,8 @@
def __reduce__(self):
return (self.__class__, self._getstate())
-timedelta.min = timedelta(-999999999)
-timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
- microseconds=999999)
+timedelta.min = timedelta(-_MAX_DELTA_DAYS)
+timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
timedelta.resolution = timedelta(microseconds=1)
class date(object):
@@ -948,32 +905,29 @@
# Computations
- def _checkOverflow(self, year):
- if not MINYEAR <= year <= MAXYEAR:
- raise OverflowError("date +/-: result year %d not in %d..%d" %
- (year, MINYEAR, MAXYEAR))
+ def _add_timedelta(self, other, factor):
+ y, m, d = _normalize_date(
+ self._year,
+ self._month,
+ self._day + other.days * factor)
+ return date(y, m, d)
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
- t = _tmxxx(self._year,
- self._month,
- self._day + other.days)
- self._checkOverflow(t.year)
- result = date(t.year, t.month, t.day)
- return result
+ return self._add_timedelta(other, 1)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
- if isinstance(other, timedelta):
- return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
- return timedelta(days1 - days2)
+ return timedelta._create(days1 - days2, 0, 0, False)
+ if isinstance(other, timedelta):
+ return self._add_timedelta(other, -1)
return NotImplemented
def weekday(self):
@@ -1340,7 +1294,7 @@
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
- offset = timedelta(minutes=offset)
+ offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
@@ -1378,7 +1332,7 @@
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
if offset is not None:
- offset = timedelta(minutes=offset)
+ offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
@@ -1505,18 +1459,24 @@
A timezone info object may be passed in as well.
"""
+ _check_tzinfo_arg(tz)
+ converter = _time.localtime if tz is None else _time.gmtime
+ self = cls._from_timestamp(converter, timestamp, tz)
+ if tz is not None:
+ self = tz.fromutc(self)
+ return self
- _check_tzinfo_arg(tz)
+ @classmethod
+ def utcfromtimestamp(cls, t):
+ "Construct a UTC datetime from a POSIX timestamp (like time.time())."
+ return cls._from_timestamp(_time.gmtime, t, None)
- converter = _time.localtime if tz is None else _time.gmtime
-
- if isinstance(timestamp, int):
- us = 0
- else:
- t_full = timestamp
- timestamp = int(_math.floor(timestamp))
- frac = t_full - timestamp
- us = _round(frac * 1e6)
+ @classmethod
+ def _from_timestamp(cls, converter, timestamp, tzinfo):
+ t_full = timestamp
+ timestamp = int(_math.floor(timestamp))
+ frac = t_full - timestamp
+ us = _round(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
@@ -1527,32 +1487,7 @@
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
- result = cls(y, m, d, hh, mm, ss, us, tz)
- if tz is not None:
- result = tz.fromutc(result)
- return result
-
- @classmethod
- def utcfromtimestamp(cls, t):
- "Construct a UTC datetime from a POSIX timestamp (like time.time())."
- if isinstance(t, int):
- us = 0
- else:
- t_full = t
- t = int(_math.floor(t))
- frac = t_full - t
- us = _round(frac * 1e6)
-
- # If timestamp is less than one microsecond smaller than a
- # full second, us can be rounded up to 1000000. In this case,
- # roll over to seconds, otherwise, ValueError is raised
- # by the constructor.
- if us == 1000000:
- t += 1
- us = 0
- y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
- ss = min(ss, 59) # clamp out leap seconds if the platform has them
- return cls(y, m, d, hh, mm, ss, us)
+ return cls(y, m, d, hh, mm, ss, us, tzinfo)
@classmethod
def now(cls, tz=None):
@@ -1594,9 +1529,9 @@
hh, mm, ss = self.hour, self.minute, self.second
offset = self._utcoffset()
if offset: # neither None nor 0
- tm = _tmxxx(y, m, d, hh, mm - offset)
- y, m, d = tm.year, tm.month, tm.day
- hh, mm = tm.hour, tm.minute
+ mm -= offset
+ y, m, d, hh, mm, ss, _ = _normalize_datetime(
+ y, m, d, hh, mm, ss, 0, ignore_overflow=True)
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
@@ -1730,7 +1665,7 @@
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
- offset = timedelta(minutes=offset)
+ offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
@@ -1768,7 +1703,7 @@
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
if offset is not None:
- offset = timedelta(minutes=offset)
+ offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
@@ -1859,22 +1794,22 @@
return -1
return diff and 1 or 0
+ def _add_timedelta(self, other, factor):
+ y, m, d, hh, mm, ss, us = _normalize_datetime(
+ self._year,
+ self._month,
+ self._day + other.days * factor,
+ self._hour,
+ self._minute,
+ self._second + other.seconds * factor,
+ self._microsecond + other.microseconds * factor)
+ return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
+
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
- t = _tmxxx(self._year,
- self._month,
- self._day + other.days,
- self._hour,
- self._minute,
- self._second + other.seconds,
- self._microsecond + other.microseconds)
- self._checkOverflow(t.year)
- result = datetime(t.year, t.month, t.day,
- t.hour, t.minute, t.second,
- t.microsecond, tzinfo=self._tzinfo)
- return result
+ return self._add_timedelta(other, 1)
__radd__ = __add__
@@ -1882,16 +1817,15 @@
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
- return self + -other
+ return self._add_timedelta(other, -1)
return NotImplemented
- days1 = self.toordinal()
- days2 = other.toordinal()
- secs1 = self._second + self._minute * 60 + self._hour * 3600
- secs2 = other._second + other._minute * 60 + other._hour * 3600
- base = timedelta(days1 - days2,
- secs1 - secs2,
- self._microsecond - other._microsecond)
+ delta_d = self.toordinal() - other.toordinal()
+ delta_s = (self._hour - other._hour) * 3600 + \
+ (self._minute - other._minute) * 60 + \
+ (self._second - other._second)
+ delta_us = self._microsecond - other._microsecond
+ base = timedelta._create(delta_d, delta_s, delta_us, True)
if self._tzinfo is other._tzinfo:
return base
myoff = self._utcoffset()
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -88,9 +88,19 @@
#
try:
unbound_method = getattr(_continulet, methodname)
+ _tls.leaving = current
args, kwds = unbound_method(current, *baseargs, to=target)
- finally:
_tls.current = current
+ except:
+ _tls.current = current
+ if hasattr(_tls, 'trace'):
+ _run_trace_callback('throw')
+ _tls.leaving = None
+ raise
+ else:
+ if hasattr(_tls, 'trace'):
+ _run_trace_callback('switch')
+ _tls.leaving = None
#
if kwds:
if args:
@@ -122,6 +132,34 @@
return f.f_back.f_back.f_back # go past start(), __switch(), switch()
# ____________________________________________________________
+# Recent additions
+
+GREENLET_USE_GC = True
+GREENLET_USE_TRACING = True
+
+def gettrace():
+ return getattr(_tls, 'trace', None)
+
+def settrace(callback):
+ try:
+ prev = _tls.trace
+ del _tls.trace
+ except AttributeError:
+ prev = None
+ if callback is not None:
+ _tls.trace = callback
+ return prev
+
+def _run_trace_callback(event):
+ try:
+ _tls.trace(event, (_tls.leaving, _tls.current))
+ except:
+ # In case of exceptions trace function is removed
+ if hasattr(_tls, 'trace'):
+ del _tls.trace
+ raise
+
+# ____________________________________________________________
# Internal stuff
try:
@@ -143,22 +181,32 @@
_tls.current = gmain
def _greenlet_start(greenlet, args):
- args, kwds = args
- _tls.current = greenlet
try:
- res = greenlet.run(*args, **kwds)
- except GreenletExit, e:
- res = e
+ args, kwds = args
+ _tls.current = greenlet
+ try:
+ if hasattr(_tls, 'trace'):
+ _run_trace_callback('switch')
+ res = greenlet.run(*args, **kwds)
+ except GreenletExit, e:
+ res = e
+ finally:
+ _continuation.permute(greenlet, greenlet.parent)
+ return ((res,), None)
finally:
- _continuation.permute(greenlet, greenlet.parent)
- return ((res,), None)
+ _tls.leaving = greenlet
def _greenlet_throw(greenlet, exc, value, tb):
- _tls.current = greenlet
try:
- raise exc, value, tb
- except GreenletExit, e:
- res = e
+ _tls.current = greenlet
+ try:
+ if hasattr(_tls, 'trace'):
+ _run_trace_callback('throw')
+ raise exc, value, tb
+ except GreenletExit, e:
+ res = e
+ finally:
+ _continuation.permute(greenlet, greenlet.parent)
+ return ((res,), None)
finally:
- _continuation.permute(greenlet, greenlet.parent)
- return ((res,), None)
+ _tls.leaving = greenlet
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -73,28 +73,36 @@
lzma (PyPy3 only)
liblzma
-sqlite3
- libsqlite3
-
-curses
- libncurses + cffi dependencies from above
-
pyexpat
libexpat1
_ssl
libssl
+Make sure to have these libraries (with development headers) installed
+before building PyPy, otherwise the resulting binary will not contain
+these modules. Furthermore, the following libraries should be present
+after building PyPy, otherwise the corresponding CFFI modules are not
+built (you can run or re-run `pypy/tool/release/package.py` to retry
+to build them; you don't need to re-translate the whole PyPy):
+
+sqlite3
+ libsqlite3
+
+curses
+ libncurses
+
gdbm
libgdbm-dev
-Make sure to have these libraries (with development headers) installed before
-building PyPy, otherwise the resulting binary will not contain these modules.
+tk
+ tk-dev
On Debian, this is the command to install all build-time dependencies::
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
- libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev
+ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
+ tk-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
@@ -102,6 +110,7 @@
yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
+ (XXX plus the Febora version of libgdbm-dev and tk-dev)
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
@@ -110,6 +119,7 @@
zypper install gcc make python-devel pkg-config \
zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
libexpat-devel libffi-devel python-curses
+ (XXX plus the SLES11 version of libgdbm-dev and tk-dev)
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
@@ -125,11 +135,13 @@
Translate with JIT::
- pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py
+ cd pypy/goal
+ pypy ../../rpython/bin/rpython --opt=jit
Translate without JIT::
- pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py
+ cd pypy/goal
+ pypy ../../rpython/bin/rpython --opt=2
(You can use ``python`` instead of ``pypy`` here, which will take longer
but works too.)
@@ -138,8 +150,16 @@
current directory. The executable behaves mostly like a normal Python
interpreter (see :doc:`cpython_differences`).
+Build cffi import libraries for the stdlib
+------------------------------------------
-.. _translate-pypy:
+Various stdlib modules require a separate build step to create the cffi
More information about the pypy-commit
mailing list