[pypy-commit] pypy win32-fixes5: merge default into branch
mattip
noreply at buildbot.pypy.org
Wed Sep 24 15:32:16 CEST 2014
Author: mattip <matti.picus at gmail.com>
Branch: win32-fixes5
Changeset: r73672:6a05a50e294b
Date: 2014-09-24 16:31 +0300
http://bitbucket.org/pypy/pypy/changeset/6a05a50e294b/
Log: merge default into branch
diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py
--- a/lib_pypy/_curses.py
+++ b/lib_pypy/_curses.py
@@ -286,6 +286,13 @@
lib = ffi.verify("""
+#ifdef __APPLE__
+/* the following define is necessary for OS X 10.6+; without it, the
+ Apple-supplied ncurses.h sets NCURSES_OPAQUE to 1, and then Python
+ can't get at the WINDOW flags field. */
+#define NCURSES_OPAQUE 0
+#endif
+
#include <ncurses.h>
#include <panel.h>
#include <term.h>
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -1242,7 +1242,7 @@
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
- raise TypeError("cannot compare naive and aware times")
+ raise TypeError("can't compare offset-naive and offset-aware times")
myhhmm = self._hour * 60 + self._minute - myoff
othhmm = other._hour * 60 + other._minute - otoff
return _cmp((myhhmm, self._second, self._microsecond),
@@ -1838,7 +1838,7 @@
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
- raise TypeError("cannot compare naive and aware datetimes")
+ raise TypeError("can't compare offset-naive and offset-aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
@@ -1885,7 +1885,7 @@
if myoff == otoff:
return base
if myoff is None or otoff is None:
- raise TypeError("cannot mix naive and timezone-aware time")
+ raise TypeError("can't subtract offset-naive and offset-aware datetimes")
return base + timedelta(minutes = otoff-myoff)
def __hash__(self):
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -38,14 +38,16 @@
no JIT: windows, linux, os/x
sandbox: linux, os/x
+* repackage and upload source tar.bz2 to bitbucket and to cobra, as some packagers
+ prefer a clearly labeled source package
* write release announcement pypy/doc/release-x.y(.z).txt
the release announcement should contain a direct link to the download page
* update pypy.org (under extradoc/pypy.org), rebuild and commit
* post announcement on morepypy.blogspot.com
-* send announcements to pypy-dev, python-list,
+* send announcements to twitter.com, pypy-dev, python-list,
python-announce, python-dev ...
* add a tag on the pypy/jitviewer repo that corresponds to pypy release
* add a tag on the codespeed web site that corresponds to pypy release
-
+* revise versioning at https://readthedocs.org/projects/pypy
diff --git a/pypy/doc/release-2.4.0.rst b/pypy/doc/release-2.4.0.rst
--- a/pypy/doc/release-2.4.0.rst
+++ b/pypy/doc/release-2.4.0.rst
@@ -5,7 +5,7 @@
We're pleased to announce PyPy 2.4, which contains significant performance
enhancements and bug fixes.
-You can already download the PyPy 2.4-beta1 pre-release here:
+You can download the PyPy 2.4.0 release here:
http://pypy.org/download.html
@@ -63,6 +63,8 @@
PyPy now uses Python 2.7.8 standard library.
+We fixed a memory leak in IO in the sandbox_ code
+
We welcomed more than 12 new contributors, and conducted two Google
Summer of Code projects, as well as other student projects not
directly related to Summer of Code.
@@ -103,8 +105,9 @@
* Many issues were resolved_ since the 2.3.1 release on June 8
-.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html
+.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.4.0.html
.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved
+.. _sandbox: http://doc.pypy.org/en/latest/sandbox.html
We have further improvements on the way: rpython file handling,
numpy linalg compatibility, as well
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -38,18 +38,15 @@
def cpython_code_signature(code):
"([list-of-arg-names], vararg-name-or-None, kwarg-name-or-None)."
argcount = code.co_argcount
+ varnames = code.co_varnames
assert argcount >= 0 # annotator hint
- argnames = list(code.co_varnames[:argcount])
+ argnames = list(varnames[:argcount])
if code.co_flags & CO_VARARGS:
- varargname = code.co_varnames[argcount]
+ varargname = varnames[argcount]
argcount += 1
else:
varargname = None
- if code.co_flags & CO_VARKEYWORDS:
- kwargname = code.co_varnames[argcount]
- argcount += 1
- else:
- kwargname = None
+ kwargname = varnames[argcount] if code.co_flags & CO_VARKEYWORDS else None
return Signature(argnames, varargname, kwargname)
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -83,12 +83,6 @@
v = PyString_DecodeEscape(space, substr, 'strict', enc)
return space.wrap(v)
-def hexbyte(val):
- result = "%x" % val
- if len(result) == 1:
- result = "0" + result
- return result
-
def decode_unicode_utf8(space, s, ps, q):
# ****The Python 2.7 version, producing UTF-32 escapes****
# String is utf8-encoded, but 'unicode_escape' expects
@@ -108,15 +102,14 @@
# instead.
lis.append("u005c")
if ord(s[ps]) & 0x80: # XXX inefficient
- w, ps = decode_utf8(space, s, ps, end, "utf-32-be")
- rn = len(w)
- assert rn % 4 == 0
- for i in range(0, rn, 4):
- lis.append('\\U')
- lis.append(hexbyte(ord(w[i])))
- lis.append(hexbyte(ord(w[i+1])))
- lis.append(hexbyte(ord(w[i+2])))
- lis.append(hexbyte(ord(w[i+3])))
+ w, ps = decode_utf8(space, s, ps, end)
+ for c in w:
+ # The equivalent of %08x, which is not supported by RPython.
+ # 7 zeroes are enough for the unicode range, and the
+ # result still fits in 32-bit.
+ hexa = hex(ord(c) + 0x10000000)
+ lis.append('\\U0')
+ lis.append(hexa[3:]) # Skip 0x and the leading 1
else:
lis.append(s[ps])
ps += 1
@@ -136,7 +129,7 @@
# note that the C code has a label here.
# the logic is the same.
if recode_encoding and ord(s[ps]) & 0x80:
- w, ps = decode_utf8(space, s, ps, end, recode_encoding)
+ w, ps = decode_utf8_recode(space, s, ps, end, recode_encoding)
# Append bytes to output buffer.
builder.append(w)
else:
@@ -222,14 +215,18 @@
ch >= 'A' and ch <= 'F')
-def decode_utf8(space, s, ps, end, encoding):
+def decode_utf8(space, s, ps, end):
assert ps >= 0
pt = ps
# while (s < end && *s != '\\') s++; */ /* inefficient for u".."
while ps < end and ord(s[ps]) & 0x80:
ps += 1
- w_u = space.wrap(unicodehelper.decode_utf8(space, s[pt:ps]))
- w_v = unicodehelper.encode(space, w_u, encoding)
+ u = unicodehelper.decode_utf8(space, s[pt:ps])
+ return u, ps
+
+def decode_utf8_recode(space, s, ps, end, recode_encoding):
+ u, ps = decode_utf8(space, s, ps, end)
+ w_v = unicodehelper.encode(space, space.wrap(u), recode_encoding)
v = space.str_w(w_v)
return v, ps
diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py
--- a/pypy/interpreter/pyparser/test/test_parsestring.py
+++ b/pypy/interpreter/pyparser/test/test_parsestring.py
@@ -73,11 +73,11 @@
def test_simple_enc_roundtrip(self):
space = self.space
- s = "'\x81'"
+ s = "'\x81\\t'"
s = s.decode("koi8-u").encode("utf8")
w_ret = parsestring.parsestr(self.space, 'koi8-u', s)
ret = space.unwrap(w_ret)
- assert ret == eval("# -*- coding: koi8-u -*-\n'\x81'")
+ assert ret == eval("# -*- coding: koi8-u -*-\n'\x81\\t'")
def test_multiline_unicode_strings_with_backslash(self):
space = self.space
diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -5,6 +5,7 @@
@specialize.memo()
def decode_error_handler(space):
+ # Fast version of the "strict" errors handler.
def raise_unicode_exception_decode(errors, encoding, msg, s,
startingpos, endingpos):
raise OperationError(space.w_UnicodeDecodeError,
@@ -17,6 +18,7 @@
@specialize.memo()
def encode_error_handler(space):
+ # Fast version of the "strict" errors handler.
def raise_unicode_exception_encode(errors, encoding, msg, u,
startingpos, endingpos):
raise OperationError(space.w_UnicodeEncodeError,
diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py
--- a/pypy/module/_cffi_backend/ctypefunc.py
+++ b/pypy/module/_cffi_backend/ctypefunc.py
@@ -8,6 +8,7 @@
from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P,
FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG)
from rpython.rlib.objectmodel import we_are_translated, instantiate
+from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from pypy.interpreter.error import OperationError, oefmt
@@ -160,6 +161,7 @@
raw_cdata = rffi.cast(rffi.CCHARPP, data)[0]
lltype.free(raw_cdata, flavor='raw')
lltype.free(buffer, flavor='raw')
+ keepalive_until_here(args_w)
return w_res
def get_mustfree_flag(data):
diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -407,8 +407,19 @@
--------
numpy.swapaxes : equivalent function
"""
- if self.is_scalar():
+ if axis1 == axis2:
return self
+ n = len(self.get_shape())
+ if n <= 1:
+ return self
+ if axis1 < 0:
+ axis1 += n
+ if axis2 < 0:
+ axis2 += n
+ if axis1 < 0 or axis1 >= n:
+ raise oefmt(space.w_ValueError, "bad axis1 argument to swapaxes")
+ if axis2 < 0 or axis2 >= n:
+ raise oefmt(space.w_ValueError, "bad axis2 argument to swapaxes")
return self.implementation.swapaxes(space, self, axis1, axis2)
def descr_nonzero(self, space):
diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -2020,6 +2020,14 @@
def test_swapaxes(self):
from numpypy import array
+ x = array([])
+ assert x.swapaxes(0, 2) is x
+ x = array([[1, 2]])
+ assert x.swapaxes(0, 0) is x
+ exc = raises(ValueError, x.swapaxes, -3, 0)
+ assert exc.value.message == "bad axis1 argument to swapaxes"
+ exc = raises(ValueError, x.swapaxes, 0, 3)
+ assert exc.value.message == "bad axis2 argument to swapaxes"
# testcases from numpy docstring
x = array([[1, 2, 3]])
assert (x.swapaxes(0, 1) == array([[1], [2], [3]])).all()
diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py
--- a/pypy/module/operator/app_operator.py
+++ b/pypy/module/operator/app_operator.py
@@ -4,7 +4,7 @@
This module exports a set of operators as functions. E.g. operator.add(x,y) is
equivalent to x+y.
'''
-from __pypy__ import builtinify
+
import types
@@ -27,7 +27,7 @@
'getslice(a, b, c) -- Same as a[b:c].'
if not isinstance(start, int) or not isinstance(end, int):
raise TypeError("an integer is expected")
- return a[start:end]
+ return a[start:end]
__getslice__ = getslice
def indexOf(a, b):
@@ -37,7 +37,7 @@
if x == b:
return index
index += 1
- raise ValueError, 'sequence.index(x): x not in sequence'
+ raise ValueError('sequence.index(x): x not in sequence')
def isMappingType(obj,):
'isMappingType(a) -- Return True if a has a mapping type, False otherwise.'
@@ -58,9 +58,9 @@
def repeat(obj, num):
'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.'
if not isinstance(num, (int, long)):
- raise TypeError, 'an integer is required'
+ raise TypeError('an integer is required')
if not isSequenceType(obj):
- raise TypeError, "non-sequence object can't be repeated"
+ raise TypeError("non-sequence object can't be repeated")
return obj * num
@@ -68,59 +68,85 @@
def setslice(a, b, c, d):
'setslice(a, b, c, d) -- Same as a[b:c] = d.'
- a[b:c] = d
+ a[b:c] = d
__setslice__ = setslice
+def _resolve_attr_chain(chain, obj, idx=0):
+ obj = getattr(obj, chain[idx])
+ if idx + 1 == len(chain):
+ return obj
+ else:
+ return _resolve_attr_chain(chain, obj, idx + 1)
+
+
+class _simple_attrgetter(object):
+ def __init__(self, attr):
+ self._attr = attr
+
+ def __call__(self, obj):
+ return getattr(obj, self._attr)
+
+
+class _single_attrgetter(object):
+ def __init__(self, attrs):
+ self._attrs = attrs
+
+ def __call__(self, obj):
+ return _resolve_attr_chain(self._attrs, obj)
+
+
+class _multi_attrgetter(object):
+ def __init__(self, attrs):
+ self._attrs = attrs
+
+ def __call__(self, obj):
+ return tuple([
+ _resolve_attr_chain(attrs, obj)
+ for attrs in self._attrs
+ ])
+
+
def attrgetter(attr, *attrs):
+ if (
+ not isinstance(attr, basestring) or
+ not all(isinstance(a, basestring) for a in attrs)
+ ):
+ def _raise_typeerror(obj):
+ raise TypeError(
+ "argument must be a string, not %r" % type(attr).__name__
+ )
+ return _raise_typeerror
if attrs:
- getters = [single_attr_getter(a) for a in (attr,) + attrs]
- def getter(obj):
- return tuple([getter(obj) for getter in getters])
+ return _multi_attrgetter([
+ a.split(".") for a in [attr] + list(attrs)
+ ])
+ elif "." not in attr:
+ return _simple_attrgetter(attr)
else:
- getter = single_attr_getter(attr)
- return builtinify(getter)
+ return _single_attrgetter(attr.split("."))
-def single_attr_getter(attr):
- if not isinstance(attr, str):
- if not isinstance(attr, unicode):
- def _raise_typeerror(obj):
- raise TypeError("argument must be a string, not %r" %
- (type(attr).__name__,))
- return _raise_typeerror
- attr = attr.encode('ascii')
- #
- def make_getter(name, prevfn=None):
- if prevfn is None:
- def getter(obj):
- return getattr(obj, name)
+
+class itemgetter(object):
+ def __init__(self, item, *items):
+ self._single = not bool(items)
+ if self._single:
+ self._idx = item
else:
- def getter(obj):
- return getattr(prevfn(obj), name)
- return getter
- #
- last = 0
- getter = None
- while True:
- dot = attr.find(".", last)
- if dot < 0: break
- getter = make_getter(attr[last:dot], getter)
- last = dot + 1
- return make_getter(attr[last:], getter)
+ self._idx = [item] + list(items)
+ def __call__(self, obj):
+ if self._single:
+ return obj[self._idx]
+ else:
+ return tuple([obj[i] for i in self._idx])
-def itemgetter(item, *items):
- if items:
- list_of_indices = [item] + list(items)
- def getter(obj):
- return tuple([obj[i] for i in list_of_indices])
- else:
- def getter(obj):
- return obj[item]
- return builtinify(getter)
+class methodcaller(object):
+ def __init__(self, method_name, *args, **kwargs):
+ self._method_name = method_name
+ self._args = args
+ self._kwargs = kwargs
-def methodcaller(method_name, *args, **kwargs):
- def call(obj):
- return getattr(obj, method_name)(*args, **kwargs)
- return builtinify(call)
+ def __call__(self, obj):
+ return getattr(obj, self._method_name)(*self._args, **self._kwargs)
diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
--- a/pypy/module/sys/test/test_sysmodule.py
+++ b/pypy/module/sys/test/test_sysmodule.py
@@ -91,9 +91,9 @@
assert isinstance(sys.__stderr__, file)
assert isinstance(sys.__stdin__, file)
- assert sys.__stdin__.name == "<stdin>"
- assert sys.__stdout__.name == "<stdout>"
- assert sys.__stderr__.name == "<stderr>"
+ #assert sys.__stdin__.name == "<stdin>"
+ #assert sys.__stdout__.name == "<stdout>"
+ #assert sys.__stderr__.name == "<stderr>"
if self.appdirect and not isinstance(sys.stdin, file):
return
diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py
--- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py
@@ -16,6 +16,10 @@
if distutils.ccompiler.get_default_compiler() == 'msvc':
self.lib_m = 'msvcrt'
+ def teardown_class(self):
+ if udir.isdir():
+ udir.remove()
+
def test_locate_engine_class(self):
cls = _locate_engine_class(FFI(), self.generic)
if self.generic:
diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py
--- a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py
@@ -73,50 +73,55 @@
assert not os.path.exists(str(SNIPPET_DIR.join(dirname, 'lextab.py')))
assert not os.path.exists(str(SNIPPET_DIR.join(dirname, 'yacctab.py')))
-def test_infrastructure():
- run_setup_and_program('infrastructure', '''
- import snip_infrastructure
- assert snip_infrastructure.func() == 42
- ''')
+class TestZIntegration(object):
+ def teardown_class(self):
+ if udir.isdir():
+ udir.remove()
-def test_distutils_module():
- run_setup_and_program("distutils_module", '''
- import snip_basic_verify
- p = snip_basic_verify.C.getpwuid(0)
- assert snip_basic_verify.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_infrastructure(self):
+ run_setup_and_program('infrastructure', '''
+ import snip_infrastructure
+ assert snip_infrastructure.func() == 42
+ ''')
-def test_distutils_package_1():
- run_setup_and_program("distutils_package_1", '''
- import snip_basic_verify1
- p = snip_basic_verify1.C.getpwuid(0)
- assert snip_basic_verify1.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_distutils_module(self):
+ run_setup_and_program("distutils_module", '''
+ import snip_basic_verify
+ p = snip_basic_verify.C.getpwuid(0)
+ assert snip_basic_verify.ffi.string(p.pw_name) == b"root"
+ ''')
-def test_distutils_package_2():
- run_setup_and_program("distutils_package_2", '''
- import snip_basic_verify2
- p = snip_basic_verify2.C.getpwuid(0)
- assert snip_basic_verify2.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_distutils_package_1(self):
+ run_setup_and_program("distutils_package_1", '''
+ import snip_basic_verify1
+ p = snip_basic_verify1.C.getpwuid(0)
+ assert snip_basic_verify1.ffi.string(p.pw_name) == b"root"
+ ''')
-def test_setuptools_module():
- run_setup_and_program("setuptools_module", '''
- import snip_setuptools_verify
- p = snip_setuptools_verify.C.getpwuid(0)
- assert snip_setuptools_verify.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_distutils_package_2(self):
+ run_setup_and_program("distutils_package_2", '''
+ import snip_basic_verify2
+ p = snip_basic_verify2.C.getpwuid(0)
+ assert snip_basic_verify2.ffi.string(p.pw_name) == b"root"
+ ''')
-def test_setuptools_package_1():
- run_setup_and_program("setuptools_package_1", '''
- import snip_setuptools_verify1
- p = snip_setuptools_verify1.C.getpwuid(0)
- assert snip_setuptools_verify1.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_setuptools_module(self):
+ run_setup_and_program("setuptools_module", '''
+ import snip_setuptools_verify
+ p = snip_setuptools_verify.C.getpwuid(0)
+ assert snip_setuptools_verify.ffi.string(p.pw_name) == b"root"
+ ''')
-def test_setuptools_package_2():
- run_setup_and_program("setuptools_package_2", '''
- import snip_setuptools_verify2
- p = snip_setuptools_verify2.C.getpwuid(0)
- assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root"
- ''')
+ def test_setuptools_package_1(self):
+ run_setup_and_program("setuptools_package_1", '''
+ import snip_setuptools_verify1
+ p = snip_setuptools_verify1.C.getpwuid(0)
+ assert snip_setuptools_verify1.ffi.string(p.pw_name) == b"root"
+ ''')
+
+ def test_setuptools_package_2(self):
+ run_setup_and_program("setuptools_package_2", '''
+ import snip_setuptools_verify2
+ p = snip_setuptools_verify2.C.getpwuid(0)
+ assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root"
+ ''')
diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py
--- a/pypy/module/test_lib_pypy/test_datetime.py
+++ b/pypy/module/test_lib_pypy/test_datetime.py
@@ -1,193 +1,221 @@
"""Additional tests for datetime."""
from __future__ import absolute_import
-from lib_pypy import datetime
import py
-def test_repr():
- print datetime
- expected = "datetime.datetime(1, 2, 3, 0, 0)"
- assert repr(datetime.datetime(1,2,3)) == expected
+class BaseTestDatetime:
+ def test_repr(self):
+ print datetime
+ expected = "datetime.datetime(1, 2, 3, 0, 0)"
+ assert repr(datetime.datetime(1,2,3)) == expected
-def test_attributes():
- for x in [datetime.date.today(),
- datetime.time(),
- datetime.datetime.utcnow(),
- datetime.timedelta(),
- datetime.tzinfo()]:
- raises(AttributeError, 'x.abc = 1')
+ def test_attributes(self):
+ for x in [datetime.date.today(),
+ datetime.time(),
+ datetime.datetime.utcnow(),
+ datetime.timedelta(),
+ datetime.tzinfo()]:
+ raises(AttributeError, 'x.abc = 1')
-def test_timedelta_init_long():
- td = datetime.timedelta(microseconds=20000000000000000000)
- assert td.days == 231481481
- assert td.seconds == 41600
- td = datetime.timedelta(microseconds=20000000000000000000.)
- assert td.days == 231481481
- assert td.seconds == 41600
+ def test_timedelta_init_long(self):
+ td = datetime.timedelta(microseconds=20000000000000000000)
+ assert td.days == 231481481
+ assert td.seconds == 41600
+ td = datetime.timedelta(microseconds=20000000000000000000.)
+ assert td.days == 231481481
+ assert td.seconds == 41600
-def test_unpickle():
- e = raises(TypeError, datetime.date, '123')
- assert e.value.args[0] == 'an integer is required'
- e = raises(TypeError, datetime.time, '123')
- assert e.value.args[0] == 'an integer is required'
- e = raises(TypeError, datetime.datetime, '123')
- assert e.value.args[0] == 'an integer is required'
+ def test_unpickle(self):
+ e = raises(TypeError, datetime.date, '123')
+ assert e.value.args[0] == 'an integer is required'
+ e = raises(TypeError, datetime.time, '123')
+ assert e.value.args[0] == 'an integer is required'
+ e = raises(TypeError, datetime.datetime, '123')
+ assert e.value.args[0] == 'an integer is required'
- datetime.time('\x01' * 6, None)
- with raises(TypeError) as e:
- datetime.time('\x01' * 6, 123)
- assert str(e.value) == "bad tzinfo state arg"
+ datetime.time('\x01' * 6, None)
+ with raises(TypeError) as e:
+ datetime.time('\x01' * 6, 123)
+ assert str(e.value) == "bad tzinfo state arg"
- datetime.datetime('\x01' * 10, None)
- with raises(TypeError) as e:
- datetime.datetime('\x01' * 10, 123)
- assert str(e.value) == "bad tzinfo state arg"
+ datetime.datetime('\x01' * 10, None)
+ with raises(TypeError) as e:
+ datetime.datetime('\x01' * 10, 123)
+ assert str(e.value) == "bad tzinfo state arg"
-def test_strptime():
- import time, sys
- if sys.version_info < (2, 6):
- py.test.skip("needs the _strptime module")
+ def test_strptime(self):
+ import time, sys
+ if sys.version_info < (2, 6):
+ py.test.skip("needs the _strptime module")
- string = '2004-12-01 13:02:47'
- format = '%Y-%m-%d %H:%M:%S'
- expected = datetime.datetime(*(time.strptime(string, format)[0:6]))
- got = datetime.datetime.strptime(string, format)
- assert expected == got
+ string = '2004-12-01 13:02:47'
+ format = '%Y-%m-%d %H:%M:%S'
+ expected = datetime.datetime(*(time.strptime(string, format)[0:6]))
+ got = datetime.datetime.strptime(string, format)
+ assert expected == got
-def test_datetime_rounding():
- b = 0.0000001
- a = 0.9999994
+ def test_datetime_rounding(self):
+ b = 0.0000001
+ a = 0.9999994
- assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999
- assert datetime.datetime.utcfromtimestamp(a).second == 0
- a += b
- assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999
- assert datetime.datetime.utcfromtimestamp(a).second == 0
- a += b
- assert datetime.datetime.utcfromtimestamp(a).microsecond == 0
- assert datetime.datetime.utcfromtimestamp(a).second == 1
+ assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999
+ assert datetime.datetime.utcfromtimestamp(a).second == 0
+ a += b
+ assert datetime.datetime.utcfromtimestamp(a).microsecond == 999999
+ assert datetime.datetime.utcfromtimestamp(a).second == 0
+ a += b
+ assert datetime.datetime.utcfromtimestamp(a).microsecond == 0
+ assert datetime.datetime.utcfromtimestamp(a).second == 1
-def test_more_datetime_rounding():
- # this test verified on top of CPython 2.7 (using a plain
- # "import datetime" above)
- expected_results = {
- -1000.0: 'datetime.datetime(1969, 12, 31, 23, 43, 20)',
- -999.9999996: 'datetime.datetime(1969, 12, 31, 23, 43, 20)',
- -999.4: 'datetime.datetime(1969, 12, 31, 23, 43, 20, 600000)',
- -999.0000004: 'datetime.datetime(1969, 12, 31, 23, 43, 21)',
- -1.0: 'datetime.datetime(1969, 12, 31, 23, 59, 59)',
- -0.9999996: 'datetime.datetime(1969, 12, 31, 23, 59, 59)',
- -0.4: 'datetime.datetime(1969, 12, 31, 23, 59, 59, 600000)',
- -0.0000004: 'datetime.datetime(1970, 1, 1, 0, 0)',
- 0.0: 'datetime.datetime(1970, 1, 1, 0, 0)',
- 0.0000004: 'datetime.datetime(1970, 1, 1, 0, 0)',
- 0.4: 'datetime.datetime(1970, 1, 1, 0, 0, 0, 400000)',
- 0.9999996: 'datetime.datetime(1970, 1, 1, 0, 0, 1)',
- 1000.0: 'datetime.datetime(1970, 1, 1, 0, 16, 40)',
- 1000.0000004: 'datetime.datetime(1970, 1, 1, 0, 16, 40)',
- 1000.4: 'datetime.datetime(1970, 1, 1, 0, 16, 40, 400000)',
- 1000.9999996: 'datetime.datetime(1970, 1, 1, 0, 16, 41)',
- 1293843661.191: 'datetime.datetime(2011, 1, 1, 1, 1, 1, 191000)',
- }
- for t in sorted(expected_results):
- dt = datetime.datetime.utcfromtimestamp(t)
- assert repr(dt) == expected_results[t]
+ def test_more_datetime_rounding(self):
+ # this test verified on top of CPython 2.7 (using a plain
+ # "import datetime" above)
+ expected_results = {
+ -1000.0: 'datetime.datetime(1969, 12, 31, 23, 43, 20)',
+ -999.9999996: 'datetime.datetime(1969, 12, 31, 23, 43, 20)',
+ -999.4: 'datetime.datetime(1969, 12, 31, 23, 43, 20, 600000)',
+ -999.0000004: 'datetime.datetime(1969, 12, 31, 23, 43, 21)',
+ -1.0: 'datetime.datetime(1969, 12, 31, 23, 59, 59)',
+ -0.9999996: 'datetime.datetime(1969, 12, 31, 23, 59, 59)',
+ -0.4: 'datetime.datetime(1969, 12, 31, 23, 59, 59, 600000)',
+ -0.0000004: 'datetime.datetime(1970, 1, 1, 0, 0)',
+ 0.0: 'datetime.datetime(1970, 1, 1, 0, 0)',
+ 0.0000004: 'datetime.datetime(1970, 1, 1, 0, 0)',
+ 0.4: 'datetime.datetime(1970, 1, 1, 0, 0, 0, 400000)',
+ 0.9999996: 'datetime.datetime(1970, 1, 1, 0, 0, 1)',
+ 1000.0: 'datetime.datetime(1970, 1, 1, 0, 16, 40)',
+ 1000.0000004: 'datetime.datetime(1970, 1, 1, 0, 16, 40)',
+ 1000.4: 'datetime.datetime(1970, 1, 1, 0, 16, 40, 400000)',
+ 1000.9999996: 'datetime.datetime(1970, 1, 1, 0, 16, 41)',
+ 1293843661.191: 'datetime.datetime(2011, 1, 1, 1, 1, 1, 191000)',
+ }
+ for t in sorted(expected_results):
+ dt = datetime.datetime.utcfromtimestamp(t)
+ assert repr(dt) == expected_results[t]
-def test_utcfromtimestamp():
- """Confirm that utcfromtimestamp and fromtimestamp give consistent results.
+ def test_utcfromtimestamp(self):
+ """Confirm that utcfromtimestamp and fromtimestamp give consistent results.
- Based on danchr's test script in https://bugs.pypy.org/issue986
- """
- import os
- import time
- if os.name == 'nt':
- skip("setting os.environ['TZ'] ineffective on windows")
- try:
- prev_tz = os.environ.get("TZ")
- os.environ["TZ"] = "GMT"
- time.tzset()
- for unused in xrange(100):
- now = time.time()
- delta = (datetime.datetime.utcfromtimestamp(now) -
- datetime.datetime.fromtimestamp(now))
- assert delta.days * 86400 + delta.seconds == 0
- finally:
- if prev_tz is None:
- del os.environ["TZ"]
- else:
- os.environ["TZ"] = prev_tz
- time.tzset()
+ Based on danchr's test script in https://bugs.pypy.org/issue986
+ """
+ import os
+ import time
+ if os.name == 'nt':
+ skip("setting os.environ['TZ'] ineffective on windows")
+ try:
+ prev_tz = os.environ.get("TZ")
+ os.environ["TZ"] = "GMT"
+ time.tzset()
+ for unused in xrange(100):
+ now = time.time()
+ delta = (datetime.datetime.utcfromtimestamp(now) -
+ datetime.datetime.fromtimestamp(now))
+ assert delta.days * 86400 + delta.seconds == 0
+ finally:
+ if prev_tz is None:
+ del os.environ["TZ"]
+ else:
+ os.environ["TZ"] = prev_tz
+ time.tzset()
-def test_utcfromtimestamp_microsecond():
- dt = datetime.datetime.utcfromtimestamp(0)
- assert isinstance(dt.microsecond, int)
+ def test_utcfromtimestamp_microsecond(self):
+ dt = datetime.datetime.utcfromtimestamp(0)
+ assert isinstance(dt.microsecond, int)
-def test_default_args():
- with py.test.raises(TypeError):
- datetime.datetime()
- with py.test.raises(TypeError):
- datetime.datetime(10)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10)
- datetime.datetime(10, 10, 10)
+ def test_default_args(self):
+ with py.test.raises(TypeError):
+ datetime.datetime()
+ with py.test.raises(TypeError):
+ datetime.datetime(10)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10)
+ datetime.datetime(10, 10, 10)
-def test_check_arg_types():
- import decimal
- class Number:
- def __init__(self, value):
- self.value = value
- def __int__(self):
- return self.value
+ def test_check_arg_types(self):
+ import decimal
+ class Number:
+ def __init__(self, value):
+ self.value = value
+ def __int__(self):
+ return self.value
- for xx in [10L,
- decimal.Decimal(10),
- decimal.Decimal('10.9'),
- Number(10),
- Number(10L)]:
- assert datetime.datetime(10, 10, 10, 10, 10, 10, 10) == \
- datetime.datetime(xx, xx, xx, xx, xx, xx, xx)
+ for xx in [10L,
+ decimal.Decimal(10),
+ decimal.Decimal('10.9'),
+ Number(10),
+ Number(10L)]:
+ assert datetime.datetime(10, 10, 10, 10, 10, 10, 10) == \
+ datetime.datetime(xx, xx, xx, xx, xx, xx, xx)
- with py.test.raises(TypeError) as e:
- datetime.datetime(10, 10, '10')
- assert str(e.value) == 'an integer is required'
+ with py.test.raises(TypeError) as e:
+ datetime.datetime(10, 10, '10')
+ assert str(e.value) == 'an integer is required'
- f10 = Number(10.9)
- with py.test.raises(TypeError) as e:
- datetime.datetime(10, 10, f10)
- assert str(e.value) == '__int__ method should return an integer'
+ f10 = Number(10.9)
+ with py.test.raises(TypeError) as e:
+ datetime.datetime(10, 10, f10)
+ assert str(e.value) == '__int__ method should return an integer'
- class Float(float):
- pass
- s10 = Float(10.9)
- with py.test.raises(TypeError) as e:
- datetime.datetime(10, 10, s10)
- assert str(e.value) == 'integer argument expected, got float'
+ class Float(float):
+ pass
+ s10 = Float(10.9)
+ with py.test.raises(TypeError) as e:
+ datetime.datetime(10, 10, s10)
+ assert str(e.value) == 'integer argument expected, got float'
- with py.test.raises(TypeError):
- datetime.datetime(10., 10, 10)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10., 10)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10, 10.)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10, 10, 10.)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10, 10, 10, 10.)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10, 10, 10, 10, 10.)
- with py.test.raises(TypeError):
- datetime.datetime(10, 10, 10, 10, 10, 10, 10.)
+ with py.test.raises(TypeError):
+ datetime.datetime(10., 10, 10)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10., 10)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10, 10.)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10, 10, 10.)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10, 10, 10, 10.)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10, 10, 10, 10, 10.)
+ with py.test.raises(TypeError):
+ datetime.datetime(10, 10, 10, 10, 10, 10, 10.)
-def test_utcnow_microsecond():
- import copy
+ def test_utcnow_microsecond(self):
+ import copy
- dt = datetime.datetime.utcnow()
- assert type(dt.microsecond) is int
+ dt = datetime.datetime.utcnow()
+ assert type(dt.microsecond) is int
- copy.copy(dt)
+ copy.copy(dt)
-def test_radd():
- class X(object):
- def __radd__(self, other):
- return "radd"
- assert datetime.date(10, 10, 10) + X() == "radd"
+ def test_radd(self):
+ class X(object):
+ def __radd__(self, other):
+ return "radd"
+ assert datetime.date(10, 10, 10) + X() == "radd"
+
+ def test_raises_if_passed_naive_datetime_and_start_or_end_time_defined(self):
+ class Foo(datetime.tzinfo):
+ def utcoffset(self, dt):
+ return datetime.timedelta(0.1)
+ naive = datetime.datetime(2014, 9, 22)
+ aware = datetime.datetime(2014, 9, 22, tzinfo=Foo())
+ with py.test.raises(TypeError) as e:
+ naive == aware
+ assert str(e.value) == "can't compare offset-naive and offset-aware datetimes"
+ with py.test.raises(TypeError) as e:
+ naive - aware
+ assert str(e.value) == "can't subtract offset-naive and offset-aware datetimes"
+ naive = datetime.time(7, 32, 12)
+ aware = datetime.time(7, 32, 12, tzinfo=Foo())
+ with py.test.raises(TypeError) as e:
+ naive == aware
+ assert str(e.value) == "can't compare offset-naive and offset-aware times"
+
+class TestDatetimeCPython(BaseTestDatetime):
+ def setup_class(cls):
+ global datetime
+ import datetime
+
+class TestDatetimePyPy(BaseTestDatetime):
+ def setup_class(cls):
+ global datetime
+ from lib_pypy import datetime
diff --git a/pypy/sandbox/pypy_interact.py b/pypy/sandbox/pypy_interact.py
--- a/pypy/sandbox/pypy_interact.py
+++ b/pypy/sandbox/pypy_interact.py
@@ -55,7 +55,7 @@
return Dir({
'bin': Dir({
- 'pypy-c': RealFile(self.executable),
+ 'pypy-c': RealFile(self.executable, mode=0111),
'lib-python': RealDir(os.path.join(libroot, 'lib-python'),
exclude=exclude),
'lib_pypy': RealDir(os.path.join(libroot, 'lib_pypy'),
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -297,7 +297,13 @@
argparse = imp.load_source('argparse', 'lib-python/2.7/argparse.py')
if sys.platform == 'win32':
pypy_exe = 'pypy.exe'
- license_base = os.path.join(basedir, r'..\..\..\local') # as on buildbot YMMV
+ for p in [os.path.join(basedir, r'..\..\..\local'), #buildbot
+ os.path.join(basedir, r'..\local')]: # pypy/doc/windows.rst
+ if os.path.exists(p):
+ license_base = p
+ break
+ else:
+ license_base = 'unkown'
else:
pypy_exe = 'pypy'
license_base = '/usr/share/doc'
@@ -370,5 +376,21 @@
if __name__ == '__main__':
import sys
+ if sys.platform == 'win32':
+ # Try to avoid opeing a dialog box if one of the
+ # subprocesses causes a system error
+ import ctypes
+ winapi = ctypes.windll.kernel32
+ SetErrorMode = winapi.SetErrorMode
+ SetErrorMode.argtypes=[ctypes.c_int]
+
+ SEM_FAILCRITICALERRORS = 1
+ SEM_NOGPFAULTERRORBOX = 2
+ SEM_NOOPENFILEERRORBOX = 0x8000
+ flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX
+ #Since there is no GetErrorMode, do a double Set
+ old_mode = SetErrorMode(flags)
+ SetErrorMode(old_mode | flags)
+
retval, _ = package(*sys.argv[1:])
sys.exit(retval)
diff --git a/pypy/tool/release/test/test_package.py b/pypy/tool/release/test/test_package.py
--- a/pypy/tool/release/test/test_package.py
+++ b/pypy/tool/release/test/test_package.py
@@ -115,15 +115,21 @@
check(pypy, 0755)
def test_generate_license():
- from os.path import dirname, abspath, join
+ from os.path import dirname, abspath, join, exists
class Options(object):
pass
options = Options()
basedir = dirname(dirname(dirname(dirname(dirname(abspath(__file__))))))
options.no_tk = False
if sys.platform == 'win32':
- # as on buildbot YMMV
- options.license_base = join(basedir, r'..\..\..\local')
+ for p in [join(basedir, r'..\..\..\local'), #buildbot
+ join(basedir, r'..\local')]: # pypy/doc/windows.rst
+ if exists(p):
+ license_base = p
+ break
+ else:
+ license_base = 'unkown'
+ options.license_base = license_base
else:
options.license_base = '/usr/share/doc'
license = package.generate_license(py.path.local(basedir), options)
diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py
--- a/rpython/jit/backend/arm/callbuilder.py
+++ b/rpython/jit/backend/arm/callbuilder.py
@@ -92,7 +92,8 @@
self.mc.LDR_ri(r.r7.value, r.r5.value)
# change 'rpy_fastgil' to 0 (it should be non-zero right now)
- self.mc.DMB()
+ if self.asm.cpu.cpuinfo.arch_version >= 7:
+ self.mc.DMB()
self.mc.gen_load_int(r.r6.value, fastgil)
self.mc.MOV_ri(r.ip.value, 0)
self.mc.STR_ri(r.ip.value, r.r6.value)
@@ -112,7 +113,8 @@
self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ)
# try to claim the lock
self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed?
- self.mc.DMB()
+ if self.asm.cpu.cpuinfo.arch_version >= 7:
+ self.mc.DMB()
# the success of the lock acquisition is defined by
# 'EQ is true', or equivalently by 'r3 == 0'.
#
diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py
--- a/rpython/jit/backend/arm/codebuilder.py
+++ b/rpython/jit/backend/arm/codebuilder.py
@@ -333,6 +333,8 @@
| (rn & 0xF) << 16)
def DMB(self):
+ # ARMv7 only. I guess ARMv6 CPUs cannot be used in symmetric
+ # multi-processing at all? That would make this instruction unneeded.
# note: 'cond' is only permitted on Thumb here, but don't
# write literally 0xf57ff05f, because it's larger than 31 bits
c = cond.AL
diff --git a/rpython/jit/backend/arm/instructions.py b/rpython/jit/backend/arm/instructions.py
--- a/rpython/jit/backend/arm/instructions.py
+++ b/rpython/jit/backend/arm/instructions.py
@@ -142,6 +142,7 @@
#'VCVT' : {'opc1':0xB, 'opc2':0xE, 'opc3':0x1, 'base': False},
}
+# ARMv7 only
simd_instructions_3regs = {
'VADD_i64': {'A': 0x8, 'B': 0, 'U': 0},
'VSUB_i64': {'A': 0x8, 'B': 0, 'U': 1},
diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
--- a/rpython/jit/backend/llsupport/rewrite.py
+++ b/rpython/jit/backend/llsupport/rewrite.py
@@ -1,3 +1,4 @@
+from rpython.rlib import rgc
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rtyper.lltypesystem import llmemory
from rpython.jit.metainterp import history
@@ -390,8 +391,8 @@
val = op.getarg(0)
if val not in self.write_barrier_applied:
v = op.getarg(1)
- if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
- bool(v.value)): # store a non-NULL
+ if (isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
+ rgc.needs_write_barrier(v.value))):
self.gen_write_barrier(val)
#op = op.copy_and_change(rop.SETFIELD_RAW)
self.newops.append(op)
@@ -400,8 +401,8 @@
val = op.getarg(0)
if val not in self.write_barrier_applied:
v = op.getarg(2)
- if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
- bool(v.value)): # store a non-NULL
+ if (isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
+ rgc.needs_write_barrier(v.value))):
self.gen_write_barrier_array(val, op.getarg(1))
#op = op.copy_and_change(rop.SET{ARRAYITEM,INTERIORFIELD}_RAW)
self.newops.append(op)
diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py
--- a/rpython/rlib/rgc.py
+++ b/rpython/rlib/rgc.py
@@ -86,6 +86,14 @@
collect(i)
i += 1
+def needs_write_barrier(obj):
+ """ We need to emit write barrier if the right hand of assignment
+ is in nursery, used by the JIT for handling set*_gc(Const)
+ """
+ if not obj:
+ return False
+ return can_move(obj)
+
def _heap_stats():
raise NotImplementedError # can't be run directly
diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py
--- a/rpython/rtyper/lltypesystem/lltype.py
+++ b/rpython/rtyper/lltypesystem/lltype.py
@@ -1,16 +1,17 @@
-from types import NoneType, MethodType
import weakref
+from types import MethodType, NoneType
+
+from rpython.annotator.bookkeeper import analyzer_for, immutablevalue
from rpython.annotator.model import (
- SomeInteger, SomeBool, SomeObject, AnnotatorError)
+ AnnotatorError, SomeBool, SomeInteger, SomeObject)
+from rpython.rlib.objectmodel import Symbolic
from rpython.rlib.rarithmetic import (
- r_int, r_uint, intmask, r_singlefloat, r_ulonglong, r_longlong,
- r_longfloat, r_longlonglong, base_int, normalizedinttype, longlongmask,
- longlonglongmask, maxint, is_valid_int, is_emulated_long)
-from rpython.rlib.objectmodel import Symbolic
+ base_int, intmask, is_emulated_long, is_valid_int, longlonglongmask,
+ longlongmask, maxint, normalizedinttype, r_int, r_longfloat, r_longlong,
+ r_longlonglong, r_singlefloat, r_uint, r_ulonglong)
+from rpython.rtyper.extregistry import ExtRegistryEntry
+from rpython.tool import leakfinder
from rpython.tool.identity_dict import identity_dict
-from rpython.tool import leakfinder
-from rpython.annotator.bookkeeper import analyzer_for, immutablevalue
-from rpython.rtyper.extregistry import ExtRegistryEntry
class State(object):
pass
@@ -313,14 +314,12 @@
except KeyError:
return ContainerType.__getattr__(self, name)
-
def _nofield(self, name):
raise AttributeError('struct %s has no field %r' % (self._name,
name))
def _names_without_voids(self):
- names_without_voids = [name for name in self._names if self._flds[name] is not Void]
- return names_without_voids
+ return [name for name in self._names if self._flds[name] is not Void]
def _str_fields_without_voids(self):
return ', '.join(['%s: %s' % (name, self._flds[name])
@@ -576,8 +575,10 @@
_gckind = 'raw'
def __init__(self, tag, hints={}):
- """ if hints['render_structure'] is set, the type is internal and not considered
- to come from somewhere else (it should be rendered as a structure) """
+ """If hints['render_structure'] is set, the type is internal and
+ not considered to come from somewhere else (it should be
+ rendered as a structure)
+ """
self.tag = tag
self.__name__ = tag
self.hints = frozendict(hints)
@@ -675,7 +676,8 @@
_numbertypes = {int: Number("Signed", int, intmask)}
_numbertypes[r_int] = _numbertypes[int]
-_numbertypes[r_longlonglong] = Number("SignedLongLongLong", r_longlonglong, longlonglongmask)
+_numbertypes[r_longlonglong] = Number("SignedLongLongLong", r_longlonglong,
+ longlonglongmask)
if r_longlong is not r_int:
_numbertypes[r_longlong] = Number("SignedLongLong", r_longlong,
longlongmask)
@@ -702,8 +704,8 @@
UnsignedLongLong = build_number("UnsignedLongLong", r_ulonglong)
Float = Primitive("Float", 0.0) # C type 'double'
-SingleFloat = Primitive("SingleFloat", r_singlefloat(0.0)) # C type 'float'
-LongFloat = Primitive("LongFloat", r_longfloat(0.0)) # C type 'long double'
+SingleFloat = Primitive("SingleFloat", r_singlefloat(0.0)) # 'float'
+LongFloat = Primitive("LongFloat", r_longfloat(0.0)) # 'long double'
r_singlefloat._TYPE = SingleFloat
Char = Primitive("Char", '\x00')
@@ -876,9 +878,11 @@
@analyzer_for(cast_primitive)
def ann_cast_primitive(T, s_v):
- from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation
+ from rpython.rtyper.llannotation import (
+ annotation_to_lltype, ll_to_annotation)
assert T.is_constant()
- return ll_to_annotation(cast_primitive(T.const, annotation_to_lltype(s_v)._defl()))
+ return ll_to_annotation(cast_primitive(T.const,
+ annotation_to_lltype(s_v)._defl()))
def _cast_whatever(TGT, value):
@@ -905,7 +909,8 @@
elif TGT == llmemory.Address and isinstance(ORIG, Ptr):
return llmemory.cast_ptr_to_adr(value)
elif TGT == Signed and isinstance(ORIG, Ptr) and ORIG.TO._gckind == 'raw':
- return llmemory.cast_adr_to_int(llmemory.cast_ptr_to_adr(value), 'symbolic')
+ return llmemory.cast_adr_to_int(llmemory.cast_ptr_to_adr(value),
+ 'symbolic')
raise TypeError("don't know how to cast from %r to %r" % (ORIG, TGT))
@@ -1176,8 +1181,8 @@
except DelayedPointer:
return True # assume it's not a delayed null
- # _setobj, _getobj and _obj0 are really _internal_ implementations details of _ptr,
- # use _obj if necessary instead !
+ # _setobj, _getobj and _obj0 are really _internal_ implementations
+ # details of _ptr, use _obj if necessary instead !
def _setobj(self, pointing_to, solid=False):
if pointing_to is None:
obj0 = None
@@ -1244,12 +1249,12 @@
if T1 == T2:
setattr(self._obj, field_name, val)
else:
- raise TypeError("%r instance field %r:\n"
- "expects %r\n"
- " got %r" % (self._T, field_name, T1, T2))
+ raise TypeError(
+ "%r instance field %r:\nexpects %r\n got %r" %
+ (self._T, field_name, T1, T2))
return
- raise AttributeError("%r instance has no field %r" % (self._T,
- field_name))
+ raise AttributeError("%r instance has no field %r" %
+ (self._T, field_name))
def __getitem__(self, i): # ! can only return basic or ptr !
if isinstance(self._T, (Array, FixedSizeArray)):
@@ -1266,7 +1271,8 @@
if isinstance(self._T, (Array, FixedSizeArray)):
T1 = self._T.OF
if isinstance(T1, ContainerType):
- raise TypeError("cannot directly assign to container array items")
+ raise TypeError("cannot directly assign to container array "
+ "items")
T2 = typeOf(val)
if T2 != T1:
from rpython.rtyper.lltypesystem import rffi
@@ -1316,7 +1322,8 @@
from rpython.rtyper.lltypesystem import rffi
if isinstance(self._T, FuncType):
if len(args) != len(self._T.ARGS):
- raise TypeError("calling %r with wrong argument number: %r" % (self._T, args))
+ raise TypeError("calling %r with wrong argument number: %r" %
+ (self._T, args))
for i, a, ARG in zip(range(len(self._T.ARGS)), args, self._T.ARGS):
if typeOf(a) != ARG:
# ARG could be Void
@@ -1415,11 +1422,13 @@
raise RuntimeError("widening to trash: %r" % self)
PARENTTYPE = struc._parent_type
if getattr(parent, PARENTTYPE._names[0]) != struc:
- raise InvalidCast(CURTYPE, PTRTYPE) # xxx different exception perhaps?
+ # xxx different exception perhaps?
+ raise InvalidCast(CURTYPE, PTRTYPE)
struc = parent
u -= 1
if PARENTTYPE != PTRTYPE.TO:
- raise RuntimeError("widening %r inside %r instead of %r" % (CURTYPE, PARENTTYPE, PTRTYPE.TO))
+ raise RuntimeError("widening %r inside %r instead of %r" %
+ (CURTYPE, PARENTTYPE, PTRTYPE.TO))
return _ptr(PTRTYPE, struc, solid=self._solid)
def _cast_to_int(self, check=True):
@@ -1430,7 +1439,9 @@
return obj # special case for cast_int_to_ptr() results
obj = normalizeptr(self, check)._getobj(check)
if isinstance(obj, int):
- return obj # special case for cast_int_to_ptr() results put into opaques
+ # special case for cast_int_to_ptr() results put into
+ # opaques
+ return obj
if getattr(obj, '_read_directly_intval', False):
return obj.intval # special case for _llgcopaque
result = intmask(obj._getid())
@@ -1468,7 +1479,8 @@
"""XXX A nice docstring here"""
T = typeOf(val)
if isinstance(T, ContainerType):
- if self._T._gckind == 'gc' and T._gckind == 'raw' and not isinstance(T, OpaqueType):
+ if (self._T._gckind == 'gc' and T._gckind == 'raw' and
+ not isinstance(T, OpaqueType)):
val = _interior_ptr(T, self._obj, [offset])
else:
val = _ptr(Ptr(T), val, solid=self._solid)
@@ -1531,12 +1543,14 @@
setattr(example, s_attr.const, v_lltype._defl())
def call(self, args):
- from rpython.rtyper.llannotation import annotation_to_lltype, ll_to_annotation
+ from rpython.rtyper.llannotation import (
+ annotation_to_lltype, ll_to_annotation)
args_s, kwds_s = args.unpack()
if kwds_s:
raise Exception("keyword arguments to call to a low-level fn ptr")
info = 'argument to ll function pointer call'
- llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s]
+ llargs = [annotation_to_lltype(s_arg, info)._defl()
+ for s_arg in args_s]
v = self.ll_ptrtype._example()(*llargs)
return ll_to_annotation(v)
@@ -1593,7 +1607,6 @@
return val
-
assert not '__dict__' in dir(_interior_ptr)
class _container(object):
@@ -1721,11 +1734,13 @@
__slots__ = ('_hash_cache_', '_compilation_info')
- def __new__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None):
+ def __new__(self, TYPE, n=None, initialization=None, parent=None,
+ parentindex=None):
my_variety = _struct_variety(TYPE._names)
return object.__new__(my_variety)
- def __init__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None):
+ def __init__(self, TYPE, n=None, initialization=None, parent=None,
+ parentindex=None):
_parentable.__init__(self, TYPE)
if n is not None and TYPE._arrayfld is None:
raise TypeError("%r is not variable-sized" % (TYPE,))
@@ -1734,9 +1749,11 @@
first, FIRSTTYPE = TYPE._first_struct()
for fld, typ in TYPE._flds.items():
if fld == TYPE._arrayfld:
- value = _array(typ, n, initialization=initialization, parent=self, parentindex=fld)
+ value = _array(typ, n, initialization=initialization,
+ parent=self, parentindex=fld)
else:
- value = typ._allocate(initialization=initialization, parent=self, parentindex=fld)
+ value = typ._allocate(initialization=initialization,
+ parent=self, parentindex=fld)
setattr(self, fld, value)
if parent is not None:
self._setparentstructure(parent, parentindex)
@@ -1795,7 +1812,8 @@
__slots__ = ('items',)
- def __init__(self, TYPE, n, initialization=None, parent=None, parentindex=None):
+ def __init__(self, TYPE, n, initialization=None, parent=None,
+ parentindex=None):
if not is_valid_int(n):
raise TypeError("array length must be an int")
if n < 0:
@@ -1964,7 +1982,8 @@
if not key._was_freed():
newcache[key] = value
except RuntimeError:
- pass # ignore "accessing subxxx, but already gc-ed parent"
+ # ignore "accessing subxxx, but already gc-ed parent"
+ pass
if newcache:
_subarray._cache[T] = newcache
else:
@@ -2020,8 +2039,10 @@
attrs.setdefault('_name', '?')
attrs.setdefault('_callable', None)
self.__dict__.update(attrs)
- if '_callable' in attrs and hasattr(attrs['_callable'], '_compilation_info'):
- self.__dict__['compilation_info'] = attrs['_callable']._compilation_info
+ if '_callable' in attrs and hasattr(attrs['_callable'],
+ '_compilation_info'):
+ self.__dict__['compilation_info'] = \
+ attrs['_callable']._compilation_info
def __repr__(self):
return '<%s>' % (self,)
@@ -2126,8 +2147,8 @@
return _ptr(Ptr(T), o, solid)
@analyzer_for(malloc)
-def ann_malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None,
- s_add_memory_pressure=None):
+def ann_malloc(s_T, s_n=None, s_flavor=None, s_zero=None,
+ s_track_allocation=None, s_add_memory_pressure=None):
assert (s_n is None or s_n.knowntype == int
or issubclass(s_n.knowntype, base_int))
assert s_T.is_constant()
@@ -2303,7 +2324,8 @@
@analyzer_for(runtime_type_info)
def ann_runtime_type_info(s_p):
- assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p
+ assert isinstance(s_p, SomePtr), \
+ "runtime_type_info of non-pointer: %r" % s_p
return SomePtr(typeOf(runtime_type_info(s_p.ll_ptrtype._example())))
diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py
--- a/rpython/rtyper/module/ll_os_stat.py
+++ b/rpython/rtyper/module/ll_os_stat.py
@@ -186,7 +186,10 @@
_name_struct_stat = '_stati64'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h']
else:
- _name_struct_stat = 'stat'
+ if sys.platform.startswith('linux'):
+ _name_struct_stat = 'stat64'
+ else:
+ _name_struct_stat = 'stat'
INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h']
compilation_info = ExternalCompilationInfo(
diff --git a/rpython/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py
--- a/rpython/tool/jitlogparser/parser.py
+++ b/rpython/tool/jitlogparser/parser.py
@@ -406,9 +406,9 @@
loops = []
cat = extract_category(log, 'jit-log-opt')
if not cat:
- extract_category(log, 'jit-log-rewritten')
+ cat = extract_category(log, 'jit-log-rewritten')
if not cat:
- extract_category(log, 'jit-log-noopt')
+ cat = extract_category(log, 'jit-log-noopt')
for entry in cat:
parser = ParserCls(entry, None, {}, 'lltype', None,
nonstrict=True)
diff --git a/rpython/translator/backendopt/storesink.py b/rpython/translator/backendopt/storesink.py
--- a/rpython/translator/backendopt/storesink.py
+++ b/rpython/translator/backendopt/storesink.py
@@ -1,6 +1,8 @@
from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.flowspace.model import mkentrymap, Variable
from rpython.translator.backendopt import removenoops
+from rpython.translator import simplify
def has_side_effects(op):
if op.opname == 'debug_assert':
@@ -10,38 +12,86 @@
except AttributeError:
return True
+
def storesink_graph(graph):
+ """ remove superfluous getfields. use a super-local method: all non-join
+ blocks inherit the heap information from their (single) predecessor
+ """
+ added_some_same_as = False
+ entrymap = mkentrymap(graph)
+ # all merge blocks are starting points
+ todo = [(block, None, None) for (block, prev_blocks) in entrymap.iteritems()
+ if len(prev_blocks) > 1 or block is graph.startblock]
+
+ visited = 0
+
+ while todo:
+ block, cache, inputlink = todo.pop()
+ visited += 1
+ if cache is None:
+ cache = {}
+
+ if block.operations:
+ changed_block = _storesink_block(block, cache, inputlink)
+ added_some_same_as = changed_block or added_some_same_as
+ for link in block.exits:
+ if len(entrymap[link.target]) == 1:
+ new_cache = _translate_cache(cache, link)
+ todo.append((link.target, new_cache, link))
+
+ assert visited == len(entrymap)
+ if added_some_same_as:
+ removenoops.remove_same_as(graph)
+ simplify.transform_dead_op_vars(graph)
+
+def _translate_cache(cache, link):
+ if link.target.operations == (): # exit or except block:
+ return {}
+ block = link.target
+ local_versions = {var1: var2 for var1, var2 in zip(link.args, block.inputargs)}
+ def _translate_arg(arg):
+ if isinstance(arg, Variable):
+ res = local_versions.get(arg, None)
+ if res is None:
+ res = Variable(arg)
+ res.concretetype = arg.concretetype
+ link.args.append(arg)
+ block.inputargs.append(res)
+ local_versions[arg] = res
+ return res
+ else:
+ return arg
+ new_cache = {}
+ for (var, field), res in cache.iteritems():
+ if var in local_versions or not isinstance(var, Variable):
+ new_cache[_translate_arg(var), field] = _translate_arg(res)
+ return new_cache
+
+def _storesink_block(block, cache, inputlink):
def clear_cache_for(cache, concretetype, fieldname):
for k in cache.keys():
if k[0].concretetype == concretetype and k[1] == fieldname:
del cache[k]
added_some_same_as = False
-
- for block in graph.iterblocks():
- newops = []
- cache = {}
- for op in block.operations:
- if op.opname == 'getfield':
- tup = (op.args[0], op.args[1].value)
- res = cache.get(tup, None)
- if res is not None:
- op.opname = 'same_as'
- op.args = [res]
- added_some_same_as = True
- else:
- cache[tup] = op.result
- elif op.opname in ['setarrayitem', 'setinteriorfield']:
- pass
- elif op.opname == 'setfield':
- clear_cache_for(cache, op.args[0].concretetype,
- op.args[1].value)
- elif has_side_effects(op):
- cache = {}
- newops.append(op)
- if block.operations:
- block.operations = newops
-
- if added_some_same_as:
- removenoops.remove_same_as(graph)
+ for op in block.operations:
+ if op.opname == 'getfield':
+ tup = (op.args[0], op.args[1].value)
+ res = cache.get(tup, None)
+ if res is not None:
+ op.opname = 'same_as'
+ op.args = [res]
+ added_some_same_as = True
+ else:
+ cache[tup] = op.result
+ elif op.opname in ('setarrayitem', 'setinteriorfield', "malloc", "malloc_varsize"):
+ pass
+ elif op.opname == 'setfield':
+ target = op.args[0]
+ field = op.args[1].value
+ clear_cache_for(cache, target.concretetype, field)
+ cache[target, field] = op.args[2]
+ elif has_side_effects(op):
+ cache.clear()
+ return added_some_same_as
diff --git a/rpython/translator/backendopt/test/test_storesink.py b/rpython/translator/backendopt/test/test_storesink.py
--- a/rpython/translator/backendopt/test/test_storesink.py
+++ b/rpython/translator/backendopt/test/test_storesink.py
@@ -42,7 +42,7 @@
a.x = i
return a.x
- self.check(f, [int], 1)
+ self.check(f, [int], 0)
def test_simple(self):
class A(object):
@@ -53,7 +53,7 @@
a.x = i
return a.x + a.x
- self.check(f, [int], 1)
+ self.check(f, [int], 0)
def test_irrelevant_setfield(self):
class A(object):
@@ -67,7 +67,7 @@
two = a.x
return one + two
- self.check(f, [int], 1)
+ self.check(f, [int], 0)
def test_relevant_setfield(self):
class A(object):
@@ -101,7 +101,7 @@
two = a.x
return one + two
- self.check(f, [int], 1)
+ self.check(f, [int], 0)
def test_subclass(self):
class A(object):
@@ -119,7 +119,7 @@
two = a.x
return one + two
- self.check(f, [int], 2)
+ self.check(f, [int], 1)
def test_bug_1(self):
class A(object):
@@ -133,4 +133,37 @@
return True
return n
- self.check(f, [int], 1)
+ self.check(f, [int], 0)
+
+
+ def test_cfg_splits(self):
+ class A(object):
+ pass
+
+ def f(i):
+ a = A()
+ j = i
+ for i in range(i):
+ a.x = i
+ if i:
+ j = a.x + a.x
+ else:
+ j = a.x * 5
+ return j
+
+ self.check(f, [int], 0)
+
+ def test_malloc_does_not_invalidate(self):
+ class A(object):
+ pass
+ class B(object):
+ pass
+
+ def f(i):
+ a = A()
+ a.x = i
+ b = B()
+ return a.x
+
+ self.check(f, [int], 0)
+
diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py
--- a/rpython/translator/driver.py
+++ b/rpython/translator/driver.py
@@ -481,6 +481,14 @@
libname = str(newsoname.dirpath().join('python27.lib'))
shutil.copyfile(str(soname.new(ext='lib')), libname)
self.log.info("copied: %s" % (libname,))
+ # XXX TODO : replace the nonsense above with
+ # ext_to_copy = ['lib', 'pdb']
+ ext_to_copy = ['pdb',]
+ for ext in ext_to_copy:
+ name = soname.new(ext=ext)
+ newname = newexename.new(basename=soname.basename)
+ shutil.copyfile(str(name), str(newname.new(ext=ext)))
+ self.log.info("copied: %s" % (newname,))
self.c_entryp = newexename
self.log.info('usession directory: %s' % (udir,))
self.log.info("created: %s" % (self.c_entryp,))
diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py
--- a/rpython/translator/platform/test/test_makefile.py
+++ b/rpython/translator/platform/test/test_makefile.py
@@ -44,6 +44,7 @@
assert res.returncode == 0
def test_900_files(self):
+ tmpdir = udir.join('test_900_files').ensure(dir=1)
txt = '#include <stdio.h>\n'
for i in range(900):
txt += 'int func%03d();\n' % i
@@ -52,11 +53,11 @@
txt += ' j += func%03d();\n' % i
txt += ' printf("%d\\n", j);\n'
txt += ' return 0;};\n'
- cfile = udir.join('test_900_files.c')
+ cfile = tmpdir.join('test_900_files.c')
cfile.write(txt)
cfiles = [cfile]
for i in range(900):
- cfile2 = udir.join('implement%03d.c' %i)
+ cfile2 = tmpdir.join('implement%03d.c' %i)
cfile2.write('''
int func%03d()
{
@@ -64,10 +65,10 @@
}
''' % (i, i))
cfiles.append(cfile2)
- mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir)
+ mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=tmpdir)
mk.write()
self.platform.execute_makefile(mk)
- res = self.platform.execute(udir.join('test_900_files'))
+ res = self.platform.execute(tmpdir.join('test_900_files'))
self.check_res(res, '%d\n' %sum(range(900)))
def test_precompiled_headers(self):
diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py
--- a/rpython/translator/platform/windows.py
+++ b/rpython/translator/platform/windows.py
@@ -204,8 +204,9 @@
# must come first, and after the file name all options are ignored.
# So please be careful with the order of parameters! ;-)
pdb_dir = oname.dirname
- args = ['/nologo', '/c'] + compile_args + ['/Fd%s\\' % (pdb_dir,),
- '/Fo%s' % (oname,), str(cfile)]
+ if pdb_dir:
+ compile_args += ['/Fd%s\\' % (pdb_dir,)]
+ args = ['/nologo', '/c'] + compile_args + ['/Fo%s' % (oname,), str(cfile)]
self._execute_c_compiler(cc, args, oname)
return oname
@@ -347,7 +348,7 @@
'$(CREATE_PCH) $(INCLUDEDIRS)'))
rules.append(('.c.obj', '',
'$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) '
- '/Fd$(@D)\\ /Fo$@ /c $< $(INCLUDEDIRS)'))
+ '/Fo$@ /c $< $(INCLUDEDIRS)'))
#Do not use precompiled headers for some files
#rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '',
# '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'))
@@ -362,13 +363,12 @@
target = f[:-1] + 'obj'
rules.append((target, f,
'$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) '
- '/Fd%s\\ /Fo%s /c %s $(INCLUDEDIRS)' %(
- os.path.dirname(target), target, f)))
+ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f)))
else:
rules.append(('.c.obj', '',
'$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) '
- '/Fd$(@D)\\ /Fo$@ /c $< $(INCLUDEDIRS)'))
+ '/Fo$@ /c $< $(INCLUDEDIRS)'))
for args in definitions:
@@ -410,7 +410,7 @@
'int main(int argc, char* argv[]) '
'{ return $(PYPY_MAIN_FUNCTION)(argc, argv); } > $@')
m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.obj'],
- ['$(CC_LINK) /nologo main.obj $(SHARED_IMPORT_LIB) /out:$@ /MANIFEST /MANIFESTFILE:$*.manifest',
+ ['$(CC_LINK) /nologo /debug main.obj $(SHARED_IMPORT_LIB) /out:$@ /MANIFEST /MANIFESTFILE:$*.manifest',
'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1',
])
m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'],
diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py
--- a/rpython/translator/sandbox/rsandbox.py
+++ b/rpython/translator/sandbox/rsandbox.py
@@ -60,8 +60,7 @@
def need_more_data(self):
buflen = self.buflen
- buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
- try:
+ with lltype.scoped_alloc(rffi.CCHARP.TO, buflen) as buf:
buflen = rffi.cast(rffi.SIZE_T, buflen)
count = ll_read_not_sandboxed(self.fd, buf, buflen)
count = rffi.cast(lltype.Signed, count)
@@ -69,20 +68,15 @@
raise IOError
self.buf += ''.join([buf[i] for i in range(count)])
self.buflen *= 2
- finally:
- lltype.free(buf, flavor='raw')
def sandboxed_io(buf):
STDIN = 0
STDOUT = 1
# send the buffer with the marshalled fnname and input arguments to STDOUT
- p = lltype.malloc(rffi.CCHARP.TO, len(buf), flavor='raw')
- try:
+ with lltype.scoped_alloc(rffi.CCHARP.TO, len(buf)) as p:
for i in range(len(buf)):
p[i] = buf[i]
writeall_not_sandboxed(STDOUT, p, len(buf))
- finally:
- lltype.free(p, flavor='raw')
# build a Loader that will get the answer from STDIN
loader = FdLoader(STDIN)
# check for errors
@@ -108,9 +102,8 @@
@signature(types.str(), returns=types.impossible())
def not_implemented_stub(msg):
STDERR = 2
- buf = rffi.str2charp(msg + '\n')
- writeall_not_sandboxed(STDERR, buf, len(msg) + 1)
- rffi.free_charp(buf)
+ with rffi.scoped_str2charp(msg + '\n') as buf:
+ writeall_not_sandboxed(STDERR, buf, len(msg) + 1)
raise RuntimeError(msg) # XXX in RPython, the msg is ignored at the moment
dump_string = rmarshal.get_marshaller(str)
diff --git a/rpython/translator/sandbox/sandlib.py b/rpython/translator/sandbox/sandlib.py
--- a/rpython/translator/sandbox/sandlib.py
+++ b/rpython/translator/sandbox/sandlib.py
@@ -459,6 +459,15 @@
do_ll_os__ll_os_lstat = do_ll_os__ll_os_stat
+ def do_ll_os__ll_os_access(self, vpathname, mode):
+ try:
+ node = self.get_node(vpathname)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ return False
+ raise
+ return node.access(mode)
+
def do_ll_os__ll_os_isatty(self, fd):
return self.virtual_console_isatty and fd in (0, 1, 2)
diff --git a/rpython/translator/sandbox/test/test_vfs.py b/rpython/translator/sandbox/test/test_vfs.py
--- a/rpython/translator/sandbox/test/test_vfs.py
+++ b/rpython/translator/sandbox/test/test_vfs.py
@@ -33,6 +33,8 @@
py.test.raises(OSError, d.join, 'bar')
st = d.stat()
assert stat.S_ISDIR(st.st_mode)
+ assert d.access(os.R_OK | os.X_OK)
+ assert not d.access(os.W_OK)
def test_file():
f = File('hello world')
@@ -46,6 +48,8 @@
st = f.stat()
assert stat.S_ISREG(st.st_mode)
assert st.st_size == 11
+ assert f.access(os.R_OK)
+ assert not f.access(os.W_OK)
def test_realdir_realfile():
for show_dotfiles in [False, True]:
@@ -78,6 +82,7 @@
f = v_test_vfs.join('symlink2')
assert stat.S_ISREG(f.stat().st_mode)
+ assert f.access(os.R_OK)
assert f.open().read() == 'secret'
else:
py.test.raises(OSError, v_test_vfs.join, 'symlink1')
diff --git a/rpython/translator/sandbox/vfs.py b/rpython/translator/sandbox/vfs.py
--- a/rpython/translator/sandbox/vfs.py
+++ b/rpython/translator/sandbox/vfs.py
@@ -22,7 +22,7 @@
st_size = self.getsize()
st_mode = self.kind
st_mode |= stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
- if self.kind == stat.S_IFDIR:
+ if stat.S_ISDIR(self.kind):
st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if self.read_only:
st_uid = 0 # read-only files are virtually owned by root
@@ -37,6 +37,15 @@
(st_mode, st_ino, st_dev, st_nlink, st_uid, st_gid,
st_size, st_atime, st_mtime, st_ctime))
+ def access(self, mode):
+ s = self.stat()
+ e_mode = s.st_mode & stat.S_IRWXO
+ if UID == s.st_uid:
+ e_mode |= (s.st_mode & stat.S_IRWXU) >> 6
+ if GID == s.st_gid:
+ e_mode |= (s.st_mode & stat.S_IRWXG) >> 3
+ return (e_mode & mode) == mode
+
def keys(self):
raise OSError(errno.ENOTDIR, self)
@@ -114,8 +123,9 @@
return cStringIO.StringIO(self.data)
class RealFile(File):
- def __init__(self, path):
+ def __init__(self, path, mode=0):
self.path = path
+ self.kind |= mode
def __repr__(self):
return '<RealFile %s>' % (self.path,)
def getsize(self):
diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py
--- a/rpython/translator/test/test_driver.py
+++ b/rpython/translator/test/test_driver.py
@@ -55,12 +55,15 @@
src_name = udir.join('src/dydy2.exe')
dll_name = udir.join('src/pypy.dll')
lib_name = udir.join('src/pypy.lib')
+ pdb_name = udir.join('src/pypy.pdb')
src_name.ensure()
src_name.write('exe')
dll_name.ensure()
dll_name.write('dll')
lib_name.ensure()
lib_name.write('lib')
+ pdb_name.ensure()
+ pdb_name.write('pdb')
dst_name.ensure()
class CBuilder(object):
@@ -75,6 +78,8 @@
assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib'
def test_shutil_copy():
+ if os.name == 'nt':
+ py.test.skip('Windows cannot copy or rename to an in-use file')
a = udir.join('file_a')
b = udir.join('file_a')
a.write('hello')
More information about the pypy-commit
mailing list